RIFT OSM R1 Initial Submission 86/286/1
authorJeremy Mordkoff <jeremy.mordkoff@riftio.com>
Wed, 7 Sep 2016 22:56:51 +0000 (18:56 -0400)
committerJeremy Mordkoff <jeremy.mordkoff@riftio.com>
Wed, 7 Sep 2016 22:56:51 +0000 (18:56 -0400)
Signed-off-by: Jeremy Mordkoff <jeremy.mordkoff@riftio.com>
630 files changed:
.cpack-workaround [new file with mode: 0644]
.gitignore [new file with mode: 0644]
BUILD.sh [new file with mode: 0755]
CMakeLists.txt [new file with mode: 0644]
Makefile [new file with mode: 0644]
README [new file with mode: 0644]
build-dependencies [new file with mode: 0644]
common/CMakeLists.txt [new file with mode: 0644]
common/plugins/CMakeLists.txt [new file with mode: 0644]
common/plugins/rwcntmgrtasklet/CMakeLists.txt [new file with mode: 0644]
common/plugins/rwcntmgrtasklet/Makefile [new file with mode: 0644]
common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py [new file with mode: 0644]
common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py [new file with mode: 0755]
common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py [new file with mode: 0755]
common/plugins/yang/CMakeLists.txt [new file with mode: 0644]
common/plugins/yang/rw-cloud.tailf.yang [new file with mode: 0644]
common/plugins/yang/rw-cloud.yang [new file with mode: 0644]
common/plugins/yang/rw-config-agent.tailf.yang [new file with mode: 0644]
common/plugins/yang/rw-config-agent.yang [new file with mode: 0644]
common/plugins/yang/rw-sdn.tailf.yang [new file with mode: 0644]
common/plugins/yang/rw-sdn.yang [new file with mode: 0644]
common/python/CMakeLists.txt [new file with mode: 0644]
common/python/rift/mano/__init__.py [new file with mode: 0644]
common/python/rift/mano/cloud/__init__.py [new file with mode: 0644]
common/python/rift/mano/cloud/accounts.py [new file with mode: 0644]
common/python/rift/mano/cloud/config.py [new file with mode: 0644]
common/python/rift/mano/cloud/operdata.py [new file with mode: 0644]
common/python/rift/mano/config_agent/__init__.py [new file with mode: 0644]
common/python/rift/mano/config_agent/config.py [new file with mode: 0644]
common/python/rift/mano/config_agent/operdata.py [new file with mode: 0644]
common/python/rift/mano/config_data/__init__.py [new file with mode: 0644]
common/python/rift/mano/config_data/config.py [new file with mode: 0644]
common/python/rift/mano/config_data/test/__init__.py [new file with mode: 0644]
common/python/rift/mano/config_data/test/test_converter.py [new file with mode: 0644]
common/python/rift/mano/dts/__init__.py [new file with mode: 0644]
common/python/rift/mano/dts/core.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/__init__.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/core.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/ns_subscriber.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/store.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py [new file with mode: 0644]
common/python/rift/mano/dts/subscriber/vnf_subscriber.py [new file with mode: 0644]
common/python/rift/mano/ncclient.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/common/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/common/exception.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/common/utils.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/compare_desc.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/conf/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/conf/config.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/conf/translator.conf [new file with mode: 0644]
common/python/rift/mano/tosca_translator/custom/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py [new file with mode: 0755]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py [new file with mode: 0755]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/shell.py [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2 [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2 [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml [new file with mode: 0644]
common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py [new file with mode: 0755]
common/python/rift/mano/tosca_translator/tosca-translator [new file with mode: 0755]
common/python/rift/mano/tosca_translator/translator_logging.conf [new file with mode: 0644]
common/python/rift/mano/utils/__init.py__ [new file with mode: 0644]
common/python/rift/mano/utils/compare_desc.py [new file with mode: 0644]
common/python/rift/mano/utils/juju_api.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/common/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/common/exception.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/common/utils.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/compare_desc.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/conf/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/conf/config.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/conf/translator.conf [new file with mode: 0644]
common/python/rift/mano/yang_translator/custom/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/custom/rwmano/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/yang/__init__.py [new file with mode: 0755]
common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/rwmano/yang_translator.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/shell.py [new file with mode: 0644]
common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml [new file with mode: 0644]
common/python/rift/mano/yang_translator/test/data/yang_helloworld.json [new file with mode: 0644]
common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json [new file with mode: 0644]
common/python/rift/mano/yang_translator/test/yang_translator_ut.py [new file with mode: 0755]
common/python/rift/mano/yang_translator/translator_logging.conf [new file with mode: 0644]
common/python/rift/mano/yang_translator/yang-translator [new file with mode: 0755]
common/python/test/CMakeLists.txt [new file with mode: 0644]
common/python/test/utest_config_data.py [new file with mode: 0644]
common/python/test/utest_juju_api.py [new file with mode: 0755]
common/rw_gen_package.py [new file with mode: 0755]
confd_client/CMakeLists.txt [new file with mode: 0644]
confd_client/Makefile [new file with mode: 0644]
confd_client/README [new file with mode: 0644]
confd_client/confd_client.c [new file with mode: 0644]
confd_client/test.sh [new file with mode: 0755]
examples/CMakeLists.txt [new file with mode: 0644]
examples/Makefile [new file with mode: 0644]
examples/ping_pong_ns/CMakeLists.txt [new file with mode: 0644]
examples/ping_pong_ns/Makefile [new file with mode: 0644]
examples/ping_pong_ns/config_desc.py [new file with mode: 0755]
examples/ping_pong_ns/generate_packages.sh.in [new file with mode: 0755]
examples/ping_pong_ns/ping_pong_ns/__init__.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/ping.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/ping.service [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/pong.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/pong.service [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh [new file with mode: 0755]
examples/ping_pong_ns/ping_pong_ns/start_ping [new file with mode: 0755]
examples/ping_pong_ns/ping_pong_ns/start_ping.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/start_pong [new file with mode: 0755]
examples/ping_pong_ns/ping_pong_ns/start_pong.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/test/test.sh [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/user-data [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/util/__init__.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_ns/util/util.py [new file with mode: 0644]
examples/ping_pong_ns/ping_pong_nsd.py [new symlink]
examples/ping_pong_ns/rift/mano/__init__.py [new file with mode: 0644]
examples/ping_pong_ns/rift/mano/examples/__init__.py [new file with mode: 0644]
examples/ping_pong_ns/rift/mano/examples/ping_config.py [new file with mode: 0755]
examples/ping_pong_ns/rift/mano/examples/ping_config_ut.sh [new file with mode: 0755]
examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py [new file with mode: 0755]
examples/ping_pong_ns/rift/mano/examples/start_traffic.py [new file with mode: 0755]
examples/ping_pong_ns/rift_logo.png [new file with mode: 0644]
examples/ping_pong_ns/stand_up_ping_pong [new file with mode: 0644]
foss.txt [new file with mode: 0644]
manifest/LICENSE [new file with mode: 0644]
models/CMakeLists.txt [new file with mode: 0644]
models/openmano/CMakeLists.txt [new file with mode: 0644]
models/openmano/bin/CMakeLists.txt [new file with mode: 0644]
models/openmano/bin/openmano [new file with mode: 0755]
models/openmano/bin/openmano_cleanup.sh [new file with mode: 0755]
models/openmano/python/CMakeLists.txt [new file with mode: 0644]
models/openmano/python/rift/openmano/__init__.py [new file with mode: 0644]
models/openmano/python/rift/openmano/openmano_client.py [new file with mode: 0755]
models/openmano/python/rift/openmano/rift2openmano.py [new file with mode: 0755]
models/openmano/src/CMakeLists.txt [new file with mode: 0644]
models/openmano/src/generate_tidgen_packages.sh.in [new file with mode: 0755]
models/openmano/src/openmano2rift.py [new file with mode: 0755]
models/openmano/test/tidgen_ns_2sriov.yaml [new file with mode: 0644]
models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml [new file with mode: 0644]
models/openmano/test/tidgen_ns_4sriov.yaml [new file with mode: 0644]
models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml [new file with mode: 0644]
models/openmano/test/tidgen_vnf_2sriov.yaml [new file with mode: 0644]
models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml [new file with mode: 0644]
models/openmano/test/tidgen_vnf_4sriov.yaml [new file with mode: 0644]
models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml [new file with mode: 0644]
models/plugins/CMakeLists.txt [new file with mode: 0644]
models/plugins/yang/CMakeLists.txt [new file with mode: 0644]
models/plugins/yang/Makefile [new file with mode: 0644]
models/plugins/yang/ietf-l2-topology.yang [new file with mode: 0644]
models/plugins/yang/ietf-network-topology.yang [new file with mode: 0644]
models/plugins/yang/ietf-network.tailf.yang [new file with mode: 0644]
models/plugins/yang/ietf-network.yang [new file with mode: 0644]
models/plugins/yang/mano-types.yang [new file with mode: 0644]
models/plugins/yang/nsd.yang [new file with mode: 0644]
models/plugins/yang/nsr.tailf.yang [new file with mode: 0644]
models/plugins/yang/nsr.yang [new file with mode: 0644]
models/plugins/yang/odl-network-topology.yang [new file with mode: 0644]
models/plugins/yang/pnfd.yang [new file with mode: 0644]
models/plugins/yang/rw-nsd.yang [new file with mode: 0644]
models/plugins/yang/rw-nsr.tailf.yang [new file with mode: 0644]
models/plugins/yang/rw-nsr.yang [new file with mode: 0644]
models/plugins/yang/rw-topology.yang [new file with mode: 0644]
models/plugins/yang/rw-vld.yang [new file with mode: 0644]
models/plugins/yang/rw-vlr.yang [new file with mode: 0644]
models/plugins/yang/rw-vnfd.yang [new file with mode: 0644]
models/plugins/yang/rw-vnfr.tailf.yang [new file with mode: 0644]
models/plugins/yang/rw-vnfr.yang [new file with mode: 0644]
models/plugins/yang/vld.yang [new file with mode: 0644]
models/plugins/yang/vlr.tailf.yang [new file with mode: 0644]
models/plugins/yang/vlr.yang [new file with mode: 0644]
models/plugins/yang/vnfd.yang [new file with mode: 0644]
models/plugins/yang/vnffgd.yang [new file with mode: 0644]
models/plugins/yang/vnfr.tailf.yang [new file with mode: 0644]
models/plugins/yang/vnfr.yang [new file with mode: 0644]
rwcal/CMakeLists.txt [new file with mode: 0644]
rwcal/Makefile [new file with mode: 0644]
rwcal/etc/userdata-template [new file with mode: 0644]
rwcal/include/riftware/rwcal-api.h [new file with mode: 0644]
rwcal/plugins/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/rwcalproxytasklet/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py [new file with mode: 0644]
rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py [new file with mode: 0644]
rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py [new file with mode: 0644]
rwcal/plugins/vala/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal.vala [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_aws/rwcal_aws.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py [new file with mode: 0755]
rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_mock/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_mock/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_mock/rwcal_mock.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/vala/rwcal_vsphere/Makefile [new file with mode: 0644]
rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py [new file with mode: 0644]
rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py [new file with mode: 0644]
rwcal/plugins/yang/CMakeLists.txt [new file with mode: 0644]
rwcal/plugins/yang/Makefile [new file with mode: 0644]
rwcal/plugins/yang/rwcal.yang [new file with mode: 0644]
rwcal/rift/cal/client.py [new file with mode: 0644]
rwcal/rift/cal/cloudsim [new file with mode: 0644]
rwcal/rift/cal/rwcal_status.py [new file with mode: 0644]
rwcal/rift/cal/server/__init__.py [new file with mode: 0644]
rwcal/rift/cal/server/app.py [new file with mode: 0644]
rwcal/rift/cal/server/operations.py [new file with mode: 0644]
rwcal/rift/cal/server/server.py [new file with mode: 0644]
rwcal/rift/cal/utils.py [new file with mode: 0644]
rwcal/src/CMakeLists.txt [new file with mode: 0644]
rwcal/src/Makefile [new file with mode: 0644]
rwcal/src/rwcal_py.c [new file with mode: 0644]
rwcal/src/rwvim.py [new file with mode: 0755]
rwcal/test/CMakeLists.txt [new file with mode: 0644]
rwcal/test/RIFT.ware-ready.py [new file with mode: 0755]
rwcal/test/aws_resources.py [new file with mode: 0644]
rwcal/test/cal_module_test/CMakeLists.txt [new file with mode: 0644]
rwcal/test/cal_module_test/cal_module_test [new file with mode: 0755]
rwcal/test/cal_module_test/pytest/cal_module_test.py [new file with mode: 0644]
rwcal/test/cal_module_test/pytest/conftest.py [new file with mode: 0644]
rwcal/test/cal_module_test/racfg/cal_module_test.racfg [new file with mode: 0644]
rwcal/test/cloudtool_cal.py [new file with mode: 0755]
rwcal/test/ec2.py [new file with mode: 0644]
rwcal/test/openstack_resources.py [new file with mode: 0755]
rwcal/test/rwcal_callback_gtest.cpp [new file with mode: 0644]
rwcal/test/rwcal_dump.cpp [new file with mode: 0644]
rwcal/test/test_container_cal.py [new file with mode: 0644]
rwcal/test/test_openstack_install.py [new file with mode: 0644]
rwcal/test/test_rwcal_openstack.py [new file with mode: 0644]
rwcal/test/test_rwlxc_rwlaunchpad.py [new file with mode: 0644]
rwcm/CMakeLists.txt [new file with mode: 0644]
rwcm/plugins/CMakeLists.txt [new file with mode: 0644]
rwcm/plugins/rwconman/CMakeLists.txt [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_xlate_dict.yml [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py [new file with mode: 0755]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py [new file with mode: 0644]
rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_tags.yml [new file with mode: 0644]
rwcm/plugins/rwconman/rwconmantasklet.py [new file with mode: 0755]
rwcm/plugins/yang/CMakeLists.txt [new file with mode: 0644]
rwcm/plugins/yang/rw-conman.tailf.yang [new file with mode: 0644]
rwcm/plugins/yang/rw-conman.yang [new file with mode: 0644]
rwcm/test/CMakeLists.txt [new file with mode: 0644]
rwcm/test/README.start_cm [new file with mode: 0644]
rwcm/test/cwims_juju_nsd/configuration_input_params.yml [new file with mode: 0644]
rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg [new file with mode: 0644]
rwcm/test/ping_pong_nsd/configuration_input_params.yml [new file with mode: 0644]
rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg [new file with mode: 0755]
rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg [new file with mode: 0755]
rwcm/test/rwso_test.py [new file with mode: 0755]
rwcm/test/start_cm_system.py [new file with mode: 0755]
rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml [new file with mode: 0644]
rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg [new file with mode: 0644]
rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg [new file with mode: 0644]
rwlaunchpad/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/mock/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/mock/README [new file with mode: 0644]
rwlaunchpad/mock/data/nfvi-metrics.json [new file with mode: 0644]
rwlaunchpad/mock/data/ns-instance-config.json [new file with mode: 0644]
rwlaunchpad/mock/data/nsd_catalog.json [new file with mode: 0644]
rwlaunchpad/mock/data/nsr-templates.json [new file with mode: 0644]
rwlaunchpad/mock/data/ping-pong-ns-instance-config.json [new file with mode: 0644]
rwlaunchpad/mock/data/ping-pong-nsd.json [new file with mode: 0644]
rwlaunchpad/mock/data/ping-pong-vnfd.json [new file with mode: 0644]
rwlaunchpad/mock/data/simmp-rules.json [new file with mode: 0644]
rwlaunchpad/mock/data/vld_catalog.json [new file with mode: 0644]
rwlaunchpad/mock/data/vnfd_catalog.json [new file with mode: 0644]
rwlaunchpad/mock/data/vnfr-templates.json [new file with mode: 0644]
rwlaunchpad/mock/data_model.js [new file with mode: 0644]
rwlaunchpad/mock/get_data.sh [new file with mode: 0755]
rwlaunchpad/mock/get_ns_instance_opdata.sh [new file with mode: 0755]
rwlaunchpad/mock/lp_mock_client.js [new file with mode: 0644]
rwlaunchpad/mock/package.json [new file with mode: 0644]
rwlaunchpad/mock/plugins/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/mock/plugins/yang/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang [new file with mode: 0644]
rwlaunchpad/mock/plugins/yang/lpmocklet.yang [new file with mode: 0644]
rwlaunchpad/mock/set_data.sh [new file with mode: 0755]
rwlaunchpad/mock/set_ping_pong.sh [new file with mode: 0755]
rwlaunchpad/mock/simmp.js [new file with mode: 0644]
rwlaunchpad/mock/test/test_simmp.js [new file with mode: 0644]
rwlaunchpad/plugins/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/cli/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py [new file with mode: 0644]
rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper [new file with mode: 0755]
rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py [new file with mode: 0755]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py [new file with mode: 0755]
rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py [new file with mode: 0755]
rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py [new file with mode: 0755]
rwlaunchpad/plugins/rwmonitor/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwmonitor/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonitor/rwmonitor.py [new file with mode: 0755]
rwlaunchpad/plugins/rwmonparam/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/rwmonparam.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py [new file with mode: 0644]
rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py [new file with mode: 0755]
rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py [new file with mode: 0755]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py [new file with mode: 0755]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml [new file with mode: 0644]
rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py [new file with mode: 0755]
rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwresmgr/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwresmgr/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py [new file with mode: 0644]
rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py [new file with mode: 0644]
rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py [new file with mode: 0755]
rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvnfm/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvnfm/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/topmgr/core.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/rwvnstasklet.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/test_top_datastore.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py [new file with mode: 0755]
rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn.vala [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/yang/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/rwvns/yang/rwsdn.yang [new file with mode: 0644]
rwlaunchpad/plugins/vala/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py [new file with mode: 0644]
rwlaunchpad/plugins/yang/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/plugins/yang/Makefile [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-image-mgmt.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-launchpad.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-monitor.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-nsm.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-resource-mgr.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-vnfm.yang [new file with mode: 0644]
rwlaunchpad/plugins/yang/rw-vns.yang [new file with mode: 0644]
rwlaunchpad/ra/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/ra/launchpad_longevity_systest [new file with mode: 0755]
rwlaunchpad/ra/launchpad_systest [new file with mode: 0755]
rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh [new file with mode: 0755]
rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh [new file with mode: 0755]
rwlaunchpad/ra/pingpong_longevity_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_lp_ha_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_records_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_recovery_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_scaling_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_vnf_reload_systest [new file with mode: 0755]
rwlaunchpad/ra/pingpong_vnf_systest [new file with mode: 0755]
rwlaunchpad/ra/pytest/conftest.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/multivm_vnf/conftest.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py [new file with mode: 0755]
rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py [new file with mode: 0755]
rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/conftest.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/pingpong/test_records.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/ns/test_onboard.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/test_failover.py [new file with mode: 0755]
rwlaunchpad/ra/pytest/test_launchpad.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/test_launchpad_longevity.py [new file with mode: 0644]
rwlaunchpad/ra/pytest/test_start_standby.py [new file with mode: 0755]
rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg [new file with mode: 0755]
rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/recovery_systest.racfg [new file with mode: 0644]
rwlaunchpad/ra/racfg/scaling_systest.racfg [new file with mode: 0644]
rwlaunchpad/ra/scaling_systest [new file with mode: 0755]
rwlaunchpad/test/CMakeLists.txt [new file with mode: 0644]
rwlaunchpad/test/launchpad [new file with mode: 0644]
rwlaunchpad/test/launchpad.py [new file with mode: 0755]
rwlaunchpad/test/launchpad_recovery [new file with mode: 0755]
rwlaunchpad/test/mano_error_ut.py [new file with mode: 0755]
rwlaunchpad/test/mano_ut.py [new file with mode: 0755]
rwlaunchpad/test/mgmt_recovery.py [new file with mode: 0755]
rwlaunchpad/test/pytest/lp_kt_utm_test.py [new file with mode: 0644]
rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py [new file with mode: 0644]
rwlaunchpad/test/pytest/lp_test.py [new file with mode: 0644]
rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py [new file with mode: 0644]
rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py [new file with mode: 0644]
rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py [new file with mode: 0644]
rwlaunchpad/test/racfg/lprecovery_test.racfg [new file with mode: 0644]
rwlaunchpad/test/tosca_ut.py [new file with mode: 0755]
rwlaunchpad/test/utest_nsr_handler.py [new file with mode: 0755]
rwlaunchpad/test/utest_ro_account.py [new file with mode: 0644]
rwlaunchpad/test/utest_rwmonitor.py [new file with mode: 0755]
rwlaunchpad/test/utest_rwnsm.py [new file with mode: 0755]
rwlaunchpad/test/utest_scaling_rpc.py [new file with mode: 0644]
rwmon/CMakeLists.txt [new file with mode: 0644]
rwmon/Makefile [new file with mode: 0644]
rwmon/plugins/CMakeLists.txt [new file with mode: 0644]
rwmon/plugins/vala/CMakeLists.txt [new file with mode: 0644]
rwmon/plugins/vala/Makefile [new file with mode: 0644]
rwmon/plugins/vala/rwmon.vala [new file with mode: 0644]
rwmon/plugins/vala/rwmon_ceilometer/CMakeLists.txt [new file with mode: 0644]
rwmon/plugins/vala/rwmon_ceilometer/Makefile [new file with mode: 0644]
rwmon/plugins/vala/rwmon_ceilometer/rwmon_ceilometer.py [new file with mode: 0644]
rwmon/plugins/vala/rwmon_mock/CMakeLists.txt [new file with mode: 0644]
rwmon/plugins/vala/rwmon_mock/Makefile [new file with mode: 0644]
rwmon/plugins/vala/rwmon_mock/rwmon_mock.py [new file with mode: 0644]
rwmon/plugins/yang/CMakeLists.txt [new file with mode: 0644]
rwmon/plugins/yang/Makefile [new file with mode: 0644]
rwmon/plugins/yang/rwmon.yang [new file with mode: 0644]
rwmon/test/CMakeLists.txt [new file with mode: 0644]
rwmon/test/Makefile [new file with mode: 0644]
rwmon/test/utest_rwmon.py [new file with mode: 0644]
rwso/plugins/cli/cli_so_schema_listing.txt [new file with mode: 0644]
rwso/plugins/yang/rw-sorch-log.yang [new file with mode: 0644]

diff --git a/.cpack-workaround b/.cpack-workaround
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..6c320fe
--- /dev/null
@@ -0,0 +1,45 @@
+# Object file directories
+.install
+.obj
+.build
+
+# Emacs
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+.elc
+auto-save-list
+tramp
+.\#*
+
+# vi
+*.swp
+*.swo
+*.swn
+
+libunwind/libunwind-1.1/doc/unw_get_proc_info.man
+
+#cscope
+cscope.files
+cscope.out
+
+autom4te.cache
+
+*.pyc
+core.*
+!core.py
+*.sock
+*.pyc
+*.swo
+*.swp
+.idea
+
+#Rope
+.ropeproject
+
+#node_modules
+node_modules/
+*.log
+
+.cache
diff --git a/BUILD.sh b/BUILD.sh
new file mode 100755 (executable)
index 0000000..12f06a1
--- /dev/null
+++ b/BUILD.sh
@@ -0,0 +1,94 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Jeremy Mordkoff
+# Creation Date: 08/29/2016
+# 
+#
+
+# BUILD.sh
+#
+# This is a top-level build script for RIFT.io
+#
+# args none
+#
+# dependencies -- requires sudo rights
+
+
+# ARGS
+
+PLATFORM_REPOSITORY=${1:-OSM}
+PLATFORM_VERSION=${2:-4.3.1.0.48360-1}
+
+# must be run from the top of a workspace
+cd $(dirname $0)
+
+
+
+
+# inside RIFT.io this is an NFS mount
+# so just to be safe
+test -h /usr/rift && sudo rm -f /usr/rift
+
+# get the container tools from the correct repository
+sudo rm -f /etc/yum.repos.d/private.repo
+sudo curl -o /etc/yum.repos.d/${PLATFORM_REPOSITORY}.repo \
+    http://buildtracker.riftio.com/repo_file/fc20/${PLATFORM_REPOSITORY}/ 
+sudo yum install --assumeyes rw.tools-container-tools rw.tools-scripts
+
+
+# enable the OSM repository hosted by RIFT.io
+# this contains the RIFT platform code and tools
+# and install of the packages required to build and run
+# this module
+sudo /usr/rift/container_tools/mkcontainer --modes build --modes ext --repo ${PLATFORM_REPOSITORY}
+
+temp=$(mktemp -d /tmp/rw.XXX)
+pushd $temp
+
+# yum does not accept the --nodeps and --replacefiles options so we
+# download first and then install
+yumdownloader rw.toolchain-rwbase-${PLATFORM_VERSION} \
+                       rw.toolchain-rwtoolchain-${PLATFORM_VERSION} \
+                       rw.core.mgmt-mgmt-${PLATFORM_VERSION} \
+                       rw.core.util-util-${PLATFORM_VERSION} \
+                       rw.core.rwvx-rwvx-${PLATFORM_VERSION} \
+                       rw.core.rwvx-rwha-1.0-${PLATFORM_VERSION} \
+                       rw.core.rwvx-rwdts-${PLATFORM_VERSION} \
+                       rw.automation.core-RWAUTO-${PLATFORM_VERSION} \
+            rw.core.mc-models-1.0-${PLATFORM_VERSION}
+sudo rpm -i --replacefiles --nodeps *rpm
+popd
+rm -rf $temp
+
+# this file gets in the way of the one generated by the build
+sudo rm -f /usr/rift/usr/lib/libmano_yang_gen.so
+
+
+sudo chmod 777 /usr/rift /usr/rift/usr/share
+
+# install some base files used to create VNFs
+test -d /usr/rift/images || mkdir /usr/rift/images
+for file in Fedora-x86_64-20-20131211.1-sda-ping.qcow2 Fedora-x86_64-20-20131211.1-sda-pong.qcow2 Fedora-x86_64-20-20131211.1-sda.qcow2; do
+    test -f /usr/rift/images/$file || curl -o /usr/rift/images/$file http://repo.riftio.com/releases/open.riftio.com/4.3.1/$file 
+done
+
+####### If you are re-building, you just need to run
+# these two steps
+make -j16 
+sudo make install
+
+# note to start the RIFT.io UI please run
+echo 'sudo /usr/rift/rift-shell -r -i /usr/rift -a /usr/rift/.artifacts -- ./demos/launchpad.py --use-xml-mode --no-ui'
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ae622a3
--- /dev/null
@@ -0,0 +1,86 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 03/26/2014
+# 
+
+##
+# DEPENDENCY ALERT
+# The submodule dependencies must be specified in the
+# .gitmodules.dep file at the top level (supermodule) directory
+# If this submodule depends other submodules remember to update
+# the .gitmodules.dep
+##
+
+cmake_minimum_required(VERSION 2.8)
+
+message(CMAKE_MODULE_PATH=${CMAKE_MODULE_PATH})
+
+##
+# DO NOT add any code before this and DO NOT
+# include this file anywhere else
+##
+include(rift_submodule)
+include(rift_python)
+
+##
+# Submodule specific includes will go here, 
+# These are specified here, since these variables are accessed
+# from multiple sub directories. If the variable is subdirectory
+# specific it must be declared in the subdirectory.
+##
+
+##
+# Include the subdirs
+##
+set(subdirs
+  common
+  examples
+  models
+  rwcal
+  rwmon
+  rwcm
+  rwlaunchpad
+  )
+
+if (NOT RIFT_AGENT_BUILD STREQUAL "XML_ONLY")
+  list(APPEND subdirs confd_client)
+endif()
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+##
+# This macro adds targets for documentaion, unittests, code coverage and packaging
+##
+rift_add_submodule_targets(SUBMODULE_PACKAGE_NAME "rw.core.mano")
+
+# Workaround whatever mess rw.package is doing as it can't seem
+# to figure out that it should make a directory -before- making
+# symlinks..
+set(dir usr/lib64/python${RIFT_PYTHON3}/site-packages/gi/overrides)
+install(FILES
+  ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround
+  DESTINATION ${dir})
+
+if (RIFT_SUPPORT_PYTHON2)
+  set(dir usr/lib64/python${RIFT_PYTHON2}/site-packages/gi/overrides)
+
+  install(FILES
+    ${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround
+    DESTINATION ${dir})
+endif()
+
+
diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..cf16aff
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,61 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 07/27/2016
+# 
+#
+
+.PHONY : clean
+
+makefile.top := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+TOP_SRC_PATH := $(makefile.top)
+TOP_ROOT_PATH := /usr/rift
+CMAKE_MODULE_PATH := $(TOP_ROOT_PATH)/cmake/modules
+
+RIFT_BUILD = $(TOP_SRC_PATH)/.build
+RIFT_ARTIFACTS = $(TOP_ROOT_PATH)/artifacts
+RIFT_INSTALL = $(TOP_ROOT_PATH)
+RIFT_SHELL_EXE = $(TOP_ROOT_PATH)/rift-shell -b $(RIFT_BUILD) -i $(RIFT_INSTALL) -a $(RIFT_ARTIFACTS) --
+
+CONFD = XML_ONLY
+
+BUILD_TYPE = Debug
+NOT_DEVELOPER_TYPE = TRUE
+COVERAGE_BUILD = FALSE
+RIFT_AGENT_BUILD = $(CONFD)
+PROJECT_TOP_DIR = $(TOP_ROOT_PATH)
+
+all: rw
+
+cmake::
+       mkdir -p $(RIFT_BUILD)
+       mkdir -p $(RIFT_ARTIFACTS)
+       mkdir -p $(RIFT_INSTALL)
+       cd $(RIFT_BUILD) && $(RIFT_SHELL_EXE) cmake $(TOP_SRC_PATH) -DCMAKE_INSTALL_PREFIX=$(TOP_ROOT_PATH) -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DNOT_DEVELOPER_BUILD=$(NOT_DEVELOPER_TYPE) -DCOVERAGE_BUILD=$(COVERAGE_TYPE) -DRIFT_AGENT_BUILD=$(RIFT_AGENT_BUILD) -DPROJECT_TOP_DIR=$(PROJECT_TOP_DIR) -DCMAKE_MODULE_PATH=${CMAKE_MODULE_PATH} -DRIFT_SUBMODULE_NAME=$(TOP_SRC_PATH)
+
+rw: cmake
+       $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD)
+
+install:
+       $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) install
+
+unittest:
+       $(RIFT_SHELL_EXE) $(MAKE) -C $(RIFT_BUILD) rw.unittest
+
+clean:
+       @echo "Cleaning up.."
+       -rm -rf .build
+
diff --git a/README b/README
new file mode 100644 (file)
index 0000000..ed6a799
--- /dev/null
+++ b/README
@@ -0,0 +1,9 @@
+This sumodule contains the MANO subsystem from RIFT.ware. The following
+section describes the directory structure of the MANO subsystem:
+
+common: contains code shared by mission-control and launchpad
+examples: contains a ping/pong NS example
+models: contains YANG based information models
+rwlaunchpad: contains software for RIFT.ware launchpad
+rwmc: contains software RIFT.ware mission control
+rwcm: conatins software for RIFT.ware configuration manager
diff --git a/build-dependencies b/build-dependencies
new file mode 100644 (file)
index 0000000..20668b0
--- /dev/null
@@ -0,0 +1,10 @@
+# List dependencies on packages.
+# Requires: list of packages that must be installed when building this submodule
+# Syntax:
+# ( <listname> = <item>* ; )*
+# - newlines are just whitespace
+# - comments run from '#' to end of line
+
+Requires =
+    fc20:protobuf-rw-compiler-2.6.1     # provides protoc
+;
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9d7bd4f
--- /dev/null
@@ -0,0 +1,42 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME common)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+  plugins
+  python
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+install(
+  FILES
+    rw_gen_package.py
+  DESTINATION usr/rift/mano/common
+  COMPONENT ${PKG_LONG_NAME})
diff --git a/common/plugins/CMakeLists.txt b/common/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ef14c79
--- /dev/null
@@ -0,0 +1,31 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  rwcntmgrtasklet
+  yang
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/common/plugins/rwcntmgrtasklet/CMakeLists.txt b/common/plugins/rwcntmgrtasklet/CMakeLists.txt
new file mode 100644 (file)
index 0000000..103ed16
--- /dev/null
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwcntmgrtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/common/plugins/rwcntmgrtasklet/Makefile b/common/plugins/rwcntmgrtasklet/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py b/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/__init__.py
new file mode 100644 (file)
index 0000000..88e006b
--- /dev/null
@@ -0,0 +1 @@
+from .rwcntmgrtasklet import ContainerManager
diff --git a/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py b/common/plugins/rwcntmgrtasklet/rift/tasklets/rwcntmgrtasklet/rwcntmgrtasklet.py
new file mode 100755 (executable)
index 0000000..02714a5
--- /dev/null
@@ -0,0 +1,331 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import os
+import shlex
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang,
+)
+
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.tasklets
+import rw_peas
+
+
+class SaltConnectionTimeoutError(Exception):
+    pass
+
+
+class ContainerManager(rift.tasklets.Tasklet):
+    def __init__(self, *args, **kwargs):
+        super(ContainerManager, self).__init__(*args, **kwargs)
+        self.lvm = None
+        self.resources = None
+        self.dts_api = None
+
+    def start(self):
+        super(ContainerManager, self).start()
+        self.log.info("Starting ContainerManager")
+        self.log.setLevel(logging.DEBUG)
+        ResourceProvisioning.log_hdl = self.log_hdl
+
+        self.log.debug("Registering with dts")
+        self._dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwcalYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    def stop(self):
+        super(ContainerManager, self).stop()
+        self.resources.destroy()
+        self.lvm.destroy()
+
+    @asyncio.coroutine
+    def init(self):
+        # Create the LVM backing store with the 'rift' volume group
+        self.lvm = LvmProvisioning()
+        self.resources = ResourceProvisioning(self.loop, self.log)
+
+        # Create lvm partition
+        yield from self.loop.run_in_executor(
+                None,
+                self.resources.destroy,
+                )
+
+        if "REUSE_LXC" not in os.environ:
+            # Create lvm partition
+            yield from self.loop.run_in_executor(
+                    None,
+                    self.lvm.destroy,
+                    )
+
+            # Create lvm partition
+            yield from self.loop.run_in_executor(
+                    None,
+                    self.lvm.create,
+                    )
+
+        # Create an initial set of VMs
+        yield from self.loop.run_in_executor(
+                None,
+                self.resources.create,
+                )
+
+        yield from self.loop.run_in_executor(
+                None,
+                self.resources.wait_ready,
+                )
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
+
+
+class LvmProvisioning(object):
+    """
+    This class represents LVM provisioning.
+    """
+
+    def create(self):
+        """Creates an LVM backing store"""
+        lvm.create('rift')
+
+    def destroy(self):
+        """Destroys the existing LVM backing store"""
+        lvm.destroy('rift')
+
+
+class ResourceProvisioning(object):
+    """
+    This is a placeholder class that is used to represent the provisioning of
+    container resources.
+    """
+
+    cal_interface = None
+    log_hdl = None
+
+    def __init__(self, loop, log):
+        # Initialize the CAL interface if it has not already been initialized
+        if ResourceProvisioning.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            ResourceProvisioning.cal_interface = plugin.get_interface("Cloud")
+            ResourceProvisioning.cal_interface.init(ResourceProvisioning.log_hdl)
+
+        self.account = RwcalYang.CloudAccount()
+        self.account.account_type = "cloudsim_proxy"
+        self.account.cloudsim_proxy.host = "192.168.122.1"
+
+        self.log = log
+        self.loop = loop
+        self.nvms = 1
+
+        self._vms = []
+
+    @property
+    def cal(self):
+        return ResourceProvisioning.cal_interface
+
+    def create(self):
+        """Create all of the necessary resources"""
+
+        rift_root = os.environ['RIFT_ROOT']
+        image = self.create_image("%s/images/rift-root-latest.qcow2" % (rift_root))
+
+        # Create a VM
+        for index in range(self.nvms):
+            self._vms.append(self.create_vm(image, index))
+
+        # Start the VMs
+        for vm in self._vms:
+            self.cal.start_vm(self.account, vm.vm_id)
+
+    def destroy(self):
+        """Destroy all of the provided resources"""
+
+        for container in lxc.containers():
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            if not ("REUSE_LXC" in os.environ and container == "rwm0"):
+                lxc.destroy(container)
+
+    def create_image(self, location):
+        """Creates and returns a CAL image"""
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "rift-lxc-image"
+        image.location = location
+        image.disk_format = "qcow2"
+        rc, image.id = self.cal.create_image(self.account, image)
+        return image
+
+    def create_network(self, network_name, subnet):
+        """Creates and returns a CAL network"""
+
+        network = RwcalYang.NetworkInfoItem(
+                network_name=network_name,
+                subnet=subnet,
+                )
+        rc, network.network_id = self.cal.create_network(self.account, network)
+        return network
+
+    def create_vm(self, image, index):
+        """Returns a VM
+
+        Arguments:
+            image - the image used to create the VM
+            index - an index used to label the VM
+
+        Returns:
+            A VM object
+
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+        vm.user_tags.node_id = str(uuid.uuid4())
+
+        user_data_template_str = open(
+                    os.path.join(
+                        os.environ['RIFT_INSTALL'],
+                        'etc/userdata-template',
+                        )
+                    ).read()
+
+        # Get the interface ip address of the mgmt network
+        # This is where the salt master is accessible on
+        mgmt_interface_ip = "192.168.122.1"
+
+        # Create salt-stack userdata
+        vm.cloud_init.userdata = user_data_template_str.format(
+                master_ip=mgmt_interface_ip,
+                lxcname=vm.user_tags.node_id,
+                )
+
+        rc, vm.vm_id = self.cal.create_vm(self.account, vm)
+
+        return vm
+
+    def wait_vm_salt_connection(self, vm, timeout_secs=600):
+        """ Wait for vm salt minion to reach up state with master """
+
+        vm_node_id = vm.user_tags.node_id
+        start_time = time.time()
+        self.log.debug("Waiting up to %s seconds for node id %s",
+                       timeout_secs, vm_node_id)
+        while (time.time() - start_time) < timeout_secs:
+            try:
+                stdout = subprocess.check_output(
+                        shlex.split('salt %s test.ping' % vm_node_id),
+                        universal_newlines=True,
+                        )
+            except subprocess.CalledProcessError:
+                continue
+
+            up_minions = stdout.splitlines()
+            for line in up_minions:
+                if "True" in line:
+                    return
+
+        raise SaltConnectionTimeoutError(
+                "Salt id %s did not enter UP state in %s seconds" % (
+                    vm_node_id, timeout_secs
+                    )
+                )
+
+    def wait_ready(self):
+        """ Wait for all resources to become ready """
+
+        self.log.info("Waiting for all VM's to make a salt minion connection")
+        for i, vm in enumerate(self._vms):
+            self.wait_vm_salt_connection(vm)
+            self.log.debug(
+                "Node id %s came up (%s/%s)",
+                vm.user_tags.node_id, i + 1, len(self._vms)
+                )
+
+    def create_port(self, network, vm, index):
+        """Returns a port
+
+        Arguments:
+            network - a network object
+            vm      - a VM object
+            index   - an index to label the port
+
+        Returns:
+            Returns a port object
+
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = "eth1"
+        port.network_id = network.network_id
+        port.vm_id = vm.vm_id
+
+        rc, port.port_id = self.cal.create_port(self.account, port)
+        return port
diff --git a/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py b/common/plugins/rwcntmgrtasklet/rwcntmgrtasklet.py
new file mode 100755 (executable)
index 0000000..e47d580
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwcntmgrtasklet
+
+class Tasklet(rift.tasklets.rwcntmgrtasklet.ContainerManager):
+    pass
+
+# vim: sw=4
diff --git a/common/plugins/yang/CMakeLists.txt b/common/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..be583ed
--- /dev/null
@@ -0,0 +1,42 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 2015/11/20
+# 
+
+##
+# Yang targets
+##
+rift_add_yang_target(
+  TARGET rwcloud_yang
+  YANG_FILES rw-cloud.yang rw-sdn.yang
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwsdn_yang_gen
+    rwcal_yang_gen
+  DEPENDS
+    rwcal_yang
+)
+
+rift_add_yang_target(
+  TARGET rwconfig_agent_yang
+  YANG_FILES rw-config-agent.yang
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwcal_yang_gen
+  DEPENDS
+    rwcal_yang
+)
diff --git a/common/plugins/yang/rw-cloud.tailf.yang b/common/plugins/yang/rw-cloud.tailf.yang
new file mode 100644 (file)
index 0000000..3838ab7
--- /dev/null
@@ -0,0 +1,41 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-cloud-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-cloud-annotation";
+  prefix "rw-cloud-ann";
+
+  import rw-cloud {
+    prefix rw-cloud;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-cloud:update-cloud-status" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/common/plugins/yang/rw-cloud.yang b/common/plugins/yang/rw-cloud.yang
new file mode 100644 (file)
index 0000000..f580e93
--- /dev/null
@@ -0,0 +1,106 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-cloud
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-cloud";
+  prefix "rw-cloud";
+
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  revision 2015-09-14 {
+    description
+      "Initial revision.";
+  }
+
+  container cloud {
+    rwpb:msg-new CloudConfig;
+    list account {
+      rwpb:msg-new CloudAccount;
+      description "Configure Cloud Accounts";
+
+      max-elements 16;
+      key "name";
+
+      leaf name {
+        mandatory true;
+        type string {
+            length "1..255";
+        }
+      }
+
+      leaf sdn-account {
+        description "Configured SDN account associated with this cloud account";
+        type leafref {
+          path "/rw-sdn:sdn-account/rw-sdn:name";
+        }
+      }
+
+      uses rwcal:provider-auth;
+      uses rwcal:connection-status;
+    }
+  }
+
+  rpc update-cloud-status {
+    description "Begin cloud account connection status";
+    input {
+      leaf cloud-account {
+        mandatory true;
+        description
+          "The cloud account name to update connection status for";
+        type string;
+      }
+    }
+  }
+
+  notification cloud-notif {
+    description "Notification for cloud account credentials";
+    leaf name {
+      description "Cloud account name";
+      type string;
+    }
+
+    leaf message {
+      description "Notification message";
+      type string;
+    }
+  }
+
+}
+
diff --git a/common/plugins/yang/rw-config-agent.tailf.yang b/common/plugins/yang/rw-config-agent.tailf.yang
new file mode 100644 (file)
index 0000000..85f6eca
--- /dev/null
@@ -0,0 +1,41 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-config-agent-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent-annotation";
+  prefix "rw-config-agent-ann";
+
+  import rw-config-agent {
+    prefix rw-config-agent;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-config-agent:config-agent/rw-config-agent:account/rw-config-agent:connection-status" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-config-agent:update-cfg-agent-status" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/common/plugins/yang/rw-config-agent.yang b/common/plugins/yang/rw-config-agent.yang
new file mode 100644 (file)
index 0000000..1740af3
--- /dev/null
@@ -0,0 +1,116 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-config-agent
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent";
+  prefix "rw-config-agent";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  revision 2016-02-04 {
+    description
+      "Initial revision.";
+  }
+
+  typedef config-agent-account-type {
+    description "config agent account type";
+    type enumeration {
+      enum juju;
+      enum riftca;
+    }
+  }
+
+  container config-agent {
+    rwpb:msg-new ConfigAgent;
+
+    list account {
+      rwpb:msg-new ConfigAgentAccount;
+      key "name";
+
+      description "List of configuration agent accounts";
+
+      leaf name {
+        description "Name of this config agent account";
+        type string;
+      }
+
+      leaf account-type {
+        description
+            "Default account type is Rift Configuration Agent (RiftCA)";
+        type config-agent-account-type;
+        default "riftca";
+      }
+
+      choice config-agent-account-type {
+        case juju {
+          description
+            "Configure the VNF through Juju.";
+          container juju {
+            leaf ip-address {
+                description "Juju host IP address.";
+                type inet:ip-address;
+            }
+            leaf port {
+                description 
+                    "Juju host port number. Default 17070.";
+                type inet:port-number;
+                default 17070;
+            }
+            leaf user {
+                description 
+                    "User name to connect to Juju host. Default user-admin.";
+                type string;
+                default "user-admin" ;
+            }
+            leaf secret {
+                description 
+                    "Admin secret or password for Juju host.";
+                type string;
+            }
+          }
+        }
+      }
+      uses rwcal:connection-status;
+    }
+  }
+
+  rpc update-cfg-agent-status {
+    description "Begin config agent account connection status";
+    input {
+      leaf cfg-agent-account {
+        mandatory true;
+        description
+          "The config agent account name to update connection status for";
+        type string;
+      }
+    }
+  }
+}
diff --git a/common/plugins/yang/rw-sdn.tailf.yang b/common/plugins/yang/rw-sdn.tailf.yang
new file mode 100644 (file)
index 0000000..89ee4ec
--- /dev/null
@@ -0,0 +1,40 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-sdn-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-sdn-annotation";
+  prefix "rw-sdn-ann";
+
+  import rw-sdn {
+    prefix rw-sdn;
+  }
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-sdn:sdn-account/rw-sdn:connection-status" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-sdn:update-sdn-status" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/common/plugins/yang/rw-sdn.yang b/common/plugins/yang/rw-sdn.yang
new file mode 100644 (file)
index 0000000..5b82d47
--- /dev/null
@@ -0,0 +1,71 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-sdn
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-sdn";
+  prefix "rw-sdn";
+
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwsdn {
+    prefix "rwsdn";
+  }
+
+  revision 2015-09-14 {
+    description
+      "Initial revision.";
+  }
+
+  list sdn-account {
+    rwpb:msg-new SDNAccountConfig;
+
+    key "name";
+    leaf name {
+      type string;
+    }
+
+    uses rwsdn:sdn-provider-auth;
+    uses rwsdn:connection-status;
+  }
+
+  rpc update-sdn-status {
+    description "Begin sdn accunt connection status";
+    input {
+      leaf sdn-account {
+        mandatory true;
+        description
+          "The sdn account name to update connection status for";
+        type string;
+      }
+    }
+  }
+}
diff --git a/common/python/CMakeLists.txt b/common/python/CMakeLists.txt
new file mode 100644 (file)
index 0000000..658d525
--- /dev/null
@@ -0,0 +1,158 @@
+# Creation Date: 2016/1/12
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+cmake_minimum_required(VERSION 2.8)
+
+
+rift_python_install_tree(
+  FILES
+    rift/mano/__init__.py
+    rift/mano/ncclient.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/cloud/__init__.py
+    rift/mano/cloud/accounts.py
+    rift/mano/cloud/config.py
+    rift/mano/cloud/operdata.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/config_agent/operdata.py
+    rift/mano/config_agent/__init__.py
+    rift/mano/config_agent/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+
+rift_python_install_tree(
+  FILES
+    rift/mano/dts/__init__.py
+    rift/mano/dts/core.py
+    rift/mano/dts/subscriber/__init__.py
+    rift/mano/dts/subscriber/core.py
+    rift/mano/dts/subscriber/store.py
+    rift/mano/dts/subscriber/ns_subscriber.py
+    rift/mano/dts/subscriber/vnf_subscriber.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/config_data/__init__.py
+    rift/mano/config_data/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/dts/__init__.py
+    rift/mano/dts/core.py
+    rift/mano/dts/subscriber/__init__.py
+    rift/mano/dts/subscriber/core.py
+    rift/mano/dts/subscriber/store.py
+    rift/mano/dts/subscriber/ns_subscriber.py
+    rift/mano/dts/subscriber/vnf_subscriber.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/tosca_translator/__init__.py
+    rift/mano/tosca_translator/translator_logging.conf
+    rift/mano/tosca_translator/compare_desc.py
+    rift/mano/tosca_translator/shell.py
+    rift/mano/tosca_translator/rwmano/tosca_translator.py
+    rift/mano/tosca_translator/rwmano/translate_inputs.py
+    rift/mano/tosca_translator/rwmano/__init__.py
+    rift/mano/tosca_translator/rwmano/translate_outputs.py
+    rift/mano/tosca_translator/rwmano/translate_node_templates.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_resource.py
+    rift/mano/tosca_translator/rwmano/syntax/__init__.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_template.py
+    rift/mano/tosca_translator/rwmano/syntax/mano_output.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
+    rift/mano/tosca_translator/rwmano/tosca/__init__.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
+    rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
+    rift/mano/tosca_translator/common/__init__.py
+    rift/mano/tosca_translator/common/utils.py
+    rift/mano/tosca_translator/common/exception.py
+    rift/mano/tosca_translator/custom/__init__.py
+    rift/mano/tosca_translator/custom/rwmano/__init__.py
+    rift/mano/tosca_translator/conf/translator.conf
+    rift/mano/tosca_translator/conf/__init__.py
+    rift/mano/tosca_translator/conf/config.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/utils/__init.py__
+    rift/mano/utils/compare_desc.py
+    rift/mano/utils/juju_api.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+rift_python_install_tree(
+  FILES
+    rift/mano/yang_translator/__init__.py
+    rift/mano/yang_translator/translator_logging.conf
+    rift/mano/yang_translator/shell.py
+    rift/mano/yang_translator/compare_desc.py
+    rift/mano/yang_translator/conf/config.py
+    rift/mano/yang_translator/conf/translator.conf
+    rift/mano/yang_translator/conf/__init__.py
+    rift/mano/yang_translator/rwmano/yang_translator.py
+    rift/mano/yang_translator/rwmano/translate_descriptors.py
+    rift/mano/yang_translator/rwmano/__init__.py
+    rift/mano/yang_translator/rwmano/yang/yang_vld.py
+    rift/mano/yang_translator/rwmano/yang/yang_vdu.py
+    rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
+    rift/mano/yang_translator/rwmano/yang/yang_nsd.py
+    rift/mano/yang_translator/rwmano/yang/__init__.py
+    rift/mano/yang_translator/rwmano/syntax/tosca_template.py
+    rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
+    rift/mano/yang_translator/rwmano/syntax/__init__.py
+    rift/mano/yang_translator/custom/__init__.py
+    rift/mano/yang_translator/custom/rwmano/__init__.py
+    rift/mano/yang_translator/common/utils.py
+    rift/mano/yang_translator/common/exception.py
+    rift/mano/yang_translator/common/__init__.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY
+  )
+
+set(TRANSLATOR_SCRIPTS
+  ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/tosca_translator/tosca-translator
+  ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/yang_translator/yang-translator)
+
+install(
+  FILES ${TRANSLATOR_SCRIPTS}
+    DESTINATION
+      usr/bin
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+set(subdirs
+  test
+  )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/common/python/rift/mano/__init__.py b/common/python/rift/mano/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/cloud/__init__.py b/common/python/rift/mano/cloud/__init__.py
new file mode 100644 (file)
index 0000000..4317d51
--- /dev/null
@@ -0,0 +1,30 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .accounts import (
+    CloudAccount,
+    CloudAccountCalError,
+    )
+
+from .config import (
+    CloudAccountConfigSubscriber,
+    CloudAccountConfigCallbacks
+    )
+
+from .operdata import (
+     CloudAccountDtsOperdataHandler,
+)
diff --git a/common/python/rift/mano/cloud/accounts.py b/common/python/rift/mano/cloud/accounts.py
new file mode 100644 (file)
index 0000000..57ca55f
--- /dev/null
@@ -0,0 +1,181 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import sys
+import asyncio
+from gi import require_version
+require_version('RwcalYang', '1.0')
+require_version('RwTypes', '1.0')
+require_version('RwCloudYang', '1.0')
+
+from gi.repository import (
+        RwTypes,
+        RwcalYang,
+        RwCloudYang,
+        )
+import rw_peas
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class PluginLoadingError(Exception):
+    pass
+
+
+class CloudAccountCalError(Exception):
+    pass
+
+
+class CloudAccount(object):
+    def __init__(self, log, rwlog_hdl, account_msg):
+        self._log = log
+        self._account_msg = account_msg.deep_copy()
+
+        self._cal_plugin = None
+        self._engine = None
+
+        self._cal = self.plugin.get_interface("Cloud")
+        self._cal.init(rwlog_hdl)
+
+        self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                status="unknown",
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    @property
+    def plugin(self):
+        if self._cal_plugin is None:
+            try:
+                self._cal_plugin = rw_peas.PeasPlugin(
+                        getattr(self._account_msg, self.account_type).plugin_name,
+                        'RwCal-1.0',
+                        )
+
+            except AttributeError as e:
+                raise PluginLoadingError(str(e))
+
+            self._engine, _, _ = self._cal_plugin()
+
+        return self._cal_plugin
+
+    def _wrap_status_fn(self, fn, *args, **kwargs):
+        ret = fn(*args, **kwargs)
+        rw_status = ret[0]
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self._log.error(msg)
+            raise CloudAccountCalError(msg)
+
+        # If there was only one other return value besides rw_status, then just
+        # return that element.  Otherwise return the rest of the return values
+        # as a list.
+        return ret[1] if len(ret) == 2 else ret[1:]
+
+    @property
+    def cal(self):
+        return self._cal
+
+    @property
+    def name(self):
+        return self._account_msg.name
+
+    @property
+    def account_msg(self):
+        return self._account_msg
+
+    @property
+    def cal_account_msg(self):
+        return RwcalYang.CloudAccount.from_dict(
+                self.account_msg.as_dict(),
+                ignore_missing_keys=True,
+                )
+
+    def cloud_account_msg(self, account_dict):
+        self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict)
+
+    @property
+    def account_type(self):
+        return self._account_msg.account_type
+
+    @property
+    def connection_status(self):
+        return self._status
+
+    def update_from_cfg(self, cfg):
+        self._log.debug("Updating parent CloudAccount to %s", cfg)
+
+        # Hack to catch updates triggered from apply_callback when a sdn-account is removed
+        # from a cloud-account. To be fixed properly when updates are handled
+        if (self.account_msg.name == cfg.name
+                and self.account_msg.account_type == cfg.account_type):
+            return
+
+        if cfg.has_field("sdn_account"):
+            self.account_msg.sdn_account = cfg.sdn_account
+        else:
+            raise NotImplementedError("Update cloud account not yet supported")
+
+    def create_image(self, image_info_msg):
+        image_id = self._wrap_status_fn(
+                self.cal.create_image, self.cal_account_msg, image_info_msg
+                )
+
+        return image_id
+
+    def get_image_list(self):
+        self._log.debug("Getting image list from account: %s", self.name)
+        resources = self._wrap_status_fn(
+                self.cal.get_image_list, self.cal_account_msg
+                )
+
+        return resources.imageinfo_list
+
+    @asyncio.coroutine
+    def validate_cloud_account_credentials(self, loop):
+        self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
+        self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                status="validating",
+                details="Cloud account connection validation in progress"
+                )
+        rwstatus, status = yield from loop.run_in_executor(
+                None,
+                self._cal.validate_cloud_creds,
+                self.cal_account_msg,
+                )
+        if rwstatus == RwTypes.RwStatus.SUCCESS:
+            self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+        else:
+            self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error when calling CAL validate cloud creds"
+                    )
+
+        self._log.info("Got cloud account validation response: %s", self._status)
+
+    def start_validate_credentials(self, loop):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_cloud_account_credentials(loop),
+                loop=loop
+                )
+
diff --git a/common/python/rift/mano/cloud/config.py b/common/python/rift/mano/cloud/config.py
new file mode 100644 (file)
index 0000000..1b1847c
--- /dev/null
@@ -0,0 +1,249 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rw_peas
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+
+from gi.repository import (
+    RwcalYang as rwcal,
+    RwDts as rwdts,
+    ProtobufC,
+    )
+
+from . import accounts
+
+class CloudAccountNotFound(Exception):
+    pass
+
+
+class CloudAccountError(Exception):
+    pass
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class CloudAccountConfigCallbacks(object):
+    def __init__(self,
+                 on_add_apply=None, on_add_prepare=None,
+                 on_delete_apply=None, on_delete_prepare=None):
+
+        @asyncio.coroutine
+        def prepare_noop(*args, **kwargs):
+            pass
+
+        def apply_noop(*args, **kwargs):
+            pass
+
+        self.on_add_apply = on_add_apply
+        self.on_add_prepare = on_add_prepare
+        self.on_delete_apply = on_delete_apply
+        self.on_delete_prepare = on_delete_prepare
+
+        for f in ('on_add_apply', 'on_delete_apply'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, apply_noop)
+                continue
+
+            if asyncio.iscoroutinefunction(ref):
+                raise ValueError('%s cannot be a coroutine' % (f,))
+
+        for f in ('on_add_prepare', 'on_delete_prepare'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, prepare_noop)
+                continue
+
+            if not asyncio.iscoroutinefunction(ref):
+                raise ValueError("%s must be a coroutine" % f)
+
+
+class CloudAccountConfigSubscriber(object):
+    XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
+
+    def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+        self._dts = dts
+        self._log = log
+        self._rwlog_hdl = rwlog_hdl
+        self._reg = None
+
+        self.accounts = {}
+
+        self._cloud_callbacks = cloud_callbacks
+
+    def add_account(self, account_msg):
+        self._log.info("adding cloud account: {}".format(account_msg))
+
+        account = accounts.CloudAccount(self._log, self._rwlog_hdl, account_msg)
+        self.accounts[account.name] = account
+
+        self._cloud_callbacks.on_add_apply(account)
+
+    def delete_account(self, account_name):
+        self._log.info("deleting cloud account: {}".format(account_name))
+        del self.accounts[account_name]
+
+        self._cloud_callbacks.on_delete_apply(account_name)
+
+    def update_account(self, account_msg):
+        """ Update an existing cloud account
+
+        In order to simplify update, turn an update into a delete followed by
+        an add.  The drawback to this approach is that we will not support
+        updates of an "in-use" cloud account, but this seems like a
+        reasonable trade-off.
+
+
+        Arguments:
+            account_msg - The cloud account config message
+        """
+        self._log.info("updating cloud account: {}".format(account_msg))
+
+        self.delete_account(account_msg.name)
+        self.add_account(account_msg)
+
+    def register(self):
+        @asyncio.coroutine
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
+
+            if xact.xact is None:
+                if action == rwdts.AppconfAction.INSTALL:
+                    curr_cfg = self._reg.elements
+                    for cfg in curr_cfg:
+                        self._log.debug("Cloud account being re-added after restart.")
+                        if not cfg.has_field('account_type'):
+                            raise CloudAccountError("New cloud account must contain account_type field.")
+                        self.add_account(cfg)
+                else:
+                    # When RIFT first comes up, an INSTALL is called with the current config
+                    # Since confd doesn't actally persist data this never has any data so
+                    # skip this for now.
+                    self._log.debug("No xact handle.  Skipping apply config")
+
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self._reg,
+                    xact=xact,
+                    key_name="name",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_account(cfg.name)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_account(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_account(cfg)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for Cloud Account """
+
+            action = xact_info.query_action
+            self._log.debug("Cloud account on_prepare config received (action: %s): %s",
+                            xact_info.query_action, msg)
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                if msg.name in self.accounts:
+                    self._log.debug("Cloud account already exists. Invoking update request")
+
+                    # Since updates are handled by a delete followed by an add, invoke the
+                    # delete prepare callbacks to give clients an opportunity to reject.
+                    yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    self._log.debug("Cloud account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        raise CloudAccountError("New cloud account must contain account_type field.")
+
+                    account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg)
+                    yield from self._cloud_callbacks.on_add_prepare(account)
+
+            elif action == rwdts.QueryAction.DELETE:
+                # Check if the entire cloud account got deleted
+                fref = ProtobufC.FieldReference.alloc()
+                fref.goto_whole_message(msg.to_pbcm())
+                if fref.is_field_deleted():
+                    yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    fref.goto_proto_name(msg.to_pbcm(), "sdn_account")
+                    if fref.is_field_deleted():
+                        # SDN account disassociated from cloud account
+                        account = self.accounts[msg.name]
+                        dict_account = account.account_msg.as_dict()
+                        del dict_account["sdn_account"]
+                        account.cloud_account_msg(dict_account)
+                    else:
+                        self._log.error("Deleting individual fields for cloud account not supported")
+                        xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                        return
+
+            else:
+                self._log.error("Action (%s) NOT SUPPORTED", action)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for Cloud Account config using xpath: %s",
+                        CloudAccountConfigSubscriber.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            self._reg = acg.register(
+                    xpath=CloudAccountConfigSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                    on_prepare=on_prepare,
+                    )
diff --git a/common/python/rift/mano/cloud/operdata.py b/common/python/rift/mano/cloud/operdata.py
new file mode 100644 (file)
index 0000000..4878691
--- /dev/null
@@ -0,0 +1,140 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rift.tasklets
+
+from gi.repository import(
+        RwCloudYang,
+        RwDts as rwdts,
+        )
+
+class CloudAccountNotFound(Exception):
+    pass
+
+
+class CloudAccountDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self.cloud_accounts = {}
+
+    def add_cloud_account(self, account):
+        self.cloud_accounts[account.name] = account
+        account.start_validate_credentials(self._loop)
+
+    def delete_cloud_account(self, account_name):
+        del self.cloud_accounts[account_name]
+
+    def get_saved_cloud_accounts(self, cloud_account_name):
+        ''' Get Cloud Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_cloud_accounts = []
+
+        if cloud_account_name is None or cloud_account_name == "":
+            cloud_accounts = list(self.cloud_accounts.values())
+            saved_cloud_accounts.extend(cloud_accounts)
+        elif cloud_account_name in self.cloud_accounts:
+            account = self.cloud_accounts[cloud_account_name]
+            saved_cloud_accounts.append(account)
+        else:
+            errstr = "Cloud account {} does not exist".format(cloud_account_name)
+            raise KeyError(errstr)
+
+        return saved_cloud_accounts
+
+    @asyncio.coroutine
+    def create_notification(self, account):
+        xpath = "N,/rw-cloud:cloud-notif"
+        ac_status = RwCloudYang.YangNotif_RwCloud_CloudNotif()
+        ac_status.name = account.name
+        ac_status.message = account.connection_status.details
+
+        yield from self._dts.query_create(xpath, rwdts.XactFlag.ADVISE, ac_status)
+        self._log.info("Notification called by creating dts query: %s", ac_status)
+
+
+    def _register_show_status(self):
+        def get_xpath(cloud_name=None):
+            return "D,/rw-cloud:cloud/account{}/connection-status".format(
+                    "[name='%s']" % cloud_name if cloud_name is not None else ''
+                    )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+            cloud_account_name = path_entry.key00.name
+            self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self.get_saved_cloud_accounts(cloud_account_name)
+                for account in saved_accounts:
+                    connection_status = account.connection_status
+                    self._log.debug("Responding to cloud connection status request: %s", connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=account.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-cloud:update-cloud-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("cloud_account"):
+                raise CloudAccountNotFound("Cloud account name not provided")
+
+            cloud_account_name = msg.cloud_account
+            try:
+                account = self.cloud_accounts[cloud_account_name]
+            except KeyError:
+                raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+
+            account.start_validate_credentials(self._loop)
+
+            yield from self.create_notification(account)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
diff --git a/common/python/rift/mano/config_agent/__init__.py b/common/python/rift/mano/config_agent/__init__.py
new file mode 100644 (file)
index 0000000..5807e8d
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .config import (
+    ConfigAgentCallbacks,
+    ConfigAgentSubscriber
+    )
+
+from .operdata import (
+    ConfigAgentJobManager,
+    CfgAgentJobDtsHandler,
+    CfgAgentDtsOperdataHandler
+    )
+
diff --git a/common/python/rift/mano/config_agent/config.py b/common/python/rift/mano/config_agent/config.py
new file mode 100644 (file)
index 0000000..7500bac
--- /dev/null
@@ -0,0 +1,228 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rw_peas
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+
+from gi.repository import (
+    RwcalYang as rwcal,
+    RwDts as rwdts,
+    RwConfigAgentYang as rwcfg_agent,
+    ProtobufC,
+    )
+
+class ConfigAccountNotFound(Exception):
+    pass
+
+class ConfigAccountError(Exception):
+    pass
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ConfigAgentCallbacks(object):
+    def __init__(self,
+                 on_add_apply=None, on_add_prepare=None,
+                 on_delete_apply=None, on_delete_prepare=None):
+
+        @asyncio.coroutine
+        def prepare_noop(*args, **kwargs):
+            pass
+
+        def apply_noop(*args, **kwargs):
+            pass
+
+        self.on_add_apply = on_add_apply
+        self.on_add_prepare = on_add_prepare
+        self.on_delete_apply = on_delete_apply
+        self.on_delete_prepare = on_delete_prepare
+
+        for f in ('on_add_apply', 'on_delete_apply'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, apply_noop)
+                continue
+
+            if asyncio.iscoroutinefunction(ref):
+                raise ValueError('%s cannot be a coroutine' % (f,))
+
+        for f in ('on_add_prepare', 'on_delete_prepare'):
+            ref = getattr(self, f)
+            if ref is None:
+                setattr(self, f, prepare_noop)
+                continue
+
+            if not asyncio.iscoroutinefunction(ref):
+                raise ValueError("%s must be a coroutine" % f)
+
+
+class ConfigAgentSubscriber(object):
+    XPATH = "C,/rw-config-agent:config-agent/account"
+
+    def __init__(self, dts, log, config_callbacks):
+        self._dts = dts
+        self._log = log
+        self._reg = None
+
+        self.accounts = {}
+
+        self._config_callbacks = config_callbacks
+
+    def add_account(self, account_msg):
+        self._log.info("adding config account: {}".format(account_msg))
+
+        self.accounts[account_msg.name] = account_msg
+
+        self._config_callbacks.on_add_apply(account_msg)
+
+    def delete_account(self, account_msg):
+        self._log.info("deleting config account: {}".format(account_msg.name))
+        del self.accounts[account_msg.name]
+
+        self._config_callbacks.on_delete_apply(account_msg)
+
+    def update_account(self, account_msg):
+        """ Update an existing config-agent account
+
+        In order to simplify update, turn an update into a delete followed by
+        an add.  The drawback to this approach is that we will not support
+        updates of an "in-use" config-agent account, but this seems like a
+        reasonable trade-off.
+
+        Arguments:
+            account_msg - The config-agent account config message
+        """
+
+        self._log.info("updating config-agent account: {}".format(account_msg))
+        self.delete_account(account_msg)
+        self.add_account(account_msg)
+
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
+
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self._log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self._reg,
+                    xact=xact,
+                    key_name="name",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_account(cfg)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_account(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_account(cfg)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for Config Account """
+
+            action = xact_info.handle.query_action
+            self._log.debug("Config account on_prepare config received (action: %s): %s",
+                            xact_info.handle.query_action, msg)
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                # If the account already exists, then this is an update.
+                if msg.name in self.accounts:
+                    self._log.debug("Config account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        raise ConfigAccountError("Cannot change config's account-type")
+
+                    # Since updates are handled by a delete followed by an add, invoke the
+                    # delete prepare callbacks to give clients an opportunity to reject.
+                    yield from self._config_callbacks.on_delete_prepare(msg.name)
+
+                else:
+                    self._log.debug("Config account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        raise ConfigAccountError("New Config account must contain account_type field.")
+
+                    account = msg
+                    yield from self._config_callbacks.on_add_prepare(account)
+
+            elif action == rwdts.QueryAction.DELETE:
+                # Check if the entire cloud account got deleted
+                fref = ProtobufC.FieldReference.alloc()
+                fref.goto_whole_message(msg.to_pbcm())
+                if fref.is_field_deleted():
+                    yield from self._config_callbacks.on_delete_prepare(msg.name)
+                else:
+                    self._log.error("Deleting individual fields for config account not supported")
+                    xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                    return
+
+            else:
+                self._log.error("Action (%s) NOT SUPPORTED", action)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for Config Account config using xpath: %s",
+                        ConfigAgentSubscriber.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            self._reg = acg.register(
+                    xpath=ConfigAgentSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    on_prepare=on_prepare,
+                    )
diff --git a/common/python/rift/mano/config_agent/operdata.py b/common/python/rift/mano/config_agent/operdata.py
new file mode 100644 (file)
index 0000000..b941667
--- /dev/null
@@ -0,0 +1,728 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+import time
+
+from gi.repository import (
+    NsrYang,
+    RwTypes,
+    RwcalYang,
+    RwNsrYang,
+    RwConfigAgentYang,
+    RwDts as rwdts)
+
+import rift.tasklets
+
+import rift.mano.utils.juju_api as juju
+
+
+class ConfigAgentAccountNotFound(Exception):
+    pass
+
+class JujuClient(object):
+    def __init__(self, log, ip, port, user, passwd):
+        self._log = log
+        self._ip = ip
+        self._port = port
+        self._user = user
+        self._passwd = passwd
+
+        self._api = juju.JujuApi(log=log,
+                                 server=ip, port=port,
+                                 user=user, secret=passwd)
+
+
+    def validate_account_creds(self):
+        status = RwcalYang.CloudConnectionStatus()
+        try:
+            env = self._api._get_env()
+        except juju.JujuEnvError as e:
+            msg = "JujuClient: Invalid account credentials: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        except ConnectionRefusedError as e:
+            msg = "JujuClient: Wrong IP or Port: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        except Exception as e:
+            msg = "JujuClient: Connection Failed: %s", str(e)
+            self._log.error(msg)
+            raise Exception(msg)
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+            self._log.info("JujuClient: Connection Successful")
+
+        return status
+
+
+class ConfigAgentAccount(object):
+    def __init__(self, log, account_msg):
+        self._log = log
+        self._account_msg = account_msg.deep_copy()
+
+        if account_msg.account_type == "juju":
+            self._cfg_agent_client_plugin = JujuClient(
+                    log,
+                    account_msg.juju.ip_address,
+                    account_msg.juju.port,
+                    account_msg.juju.user,
+                    account_msg.juju.secret)
+        else:
+            self._cfg_agent_client_plugin = None
+
+        self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                status="unknown",
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    @property
+    def name(self):
+        return self._account_msg.name
+
+    @property
+    def account_msg(self):
+        return self._account_msg
+
+    @property
+    def account_type(self):
+        return self._account_msg.account_type
+
+    @property
+    def connection_status(self):
+        return self._status
+
+    def update_from_cfg(self, cfg):
+        self._log.debug("Updating parent ConfigAgentAccount to %s", cfg)
+        raise NotImplementedError("Update config agent account not yet supported")
+
+    @asyncio.coroutine
+    def validate_cfg_agent_account_credentials(self, loop):
+        self._log.debug("Validating Config Agent Account %s, credential status %s", self._account_msg, self._status)
+
+        self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                status="validating",
+                details="Config Agent account connection validation in progress"
+                )
+
+        if self._cfg_agent_client_plugin is None:
+            self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                    status="unknown",
+                    details="Config Agent account does not support validation of account creds"
+                    )
+        else:
+            try:
+                status = yield from loop.run_in_executor(
+                    None,
+                    self._cfg_agent_client_plugin.validate_account_creds
+                    )
+                self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus.from_dict(status.as_dict())
+            except Exception as e:
+                self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error - " + str(e)
+                    )
+
+        self._log.info("Got config agent account validation response: %s", self._status)
+
+    def start_validate_credentials(self, loop):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_cfg_agent_account_credentials(loop),
+                loop=loop
+                )
+
+class CfgAgentDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self.cfg_agent_accounts = {}
+
+    def add_cfg_agent_account(self, account_msg):
+        account = ConfigAgentAccount(self._log, account_msg)
+        self.cfg_agent_accounts[account.name] = account
+        self._log.info("ConfigAgent Operdata Handler added. Starting account validation")
+
+        account.start_validate_credentials(self._loop)
+
+    def delete_cfg_agent_account(self, account_name):
+        del self.cfg_agent_accounts[account_name]
+        self._log.info("ConfigAgent Operdata Handler deleted.")
+
+    def get_saved_cfg_agent_accounts(self, cfg_agent_account_name):
+        ''' Get Config Agent Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_cfg_agent_accounts = []
+
+        if cfg_agent_account_name is None or cfg_agent_account_name == "":
+            cfg_agent_accounts = list(self.cfg_agent_accounts.values())
+            saved_cfg_agent_accounts.extend(cfg_agent_accounts)
+        elif cfg_agent_account_name in self.cfg_agent_accounts:
+            account = self.cfg_agent_accounts[cfg_agent_account_name]
+            saved_cfg_agent_accounts.append(account)
+        else:
+            errstr = "Config Agent account {} does not exist".format(cfg_agent_account_name)
+            raise KeyError(errstr)
+
+        return saved_cfg_agent_accounts
+
+
+    def _register_show_status(self):
+        def get_xpath(cfg_agent_name=None):
+            return "D,/rw-config-agent:config-agent/account{}/connection-status".format(
+                    "[name='%s']" % cfg_agent_name if cfg_agent_name is not None else ''
+                    )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwConfigAgentYang.ConfigAgentAccount.schema().keyspec_to_entry(ks_path)
+            cfg_agent_account_name = path_entry.key00.name
+            self._log.debug("Got show cfg_agent connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self.get_saved_cfg_agent_accounts(cfg_agent_account_name)
+                for account in saved_accounts:
+                    connection_status = account.connection_status
+                    self._log.debug("Responding to config agent connection status request: %s", connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=account.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-config-agent:update-cfg-agent-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("cfg_agent_account"):
+                raise ConfigAgentAccountNotFound("Config Agent account name not provided")
+
+            cfg_agent_account_name = msg.cfg_agent_account
+            try:
+                account = self.cfg_agent_accounts[cfg_agent_account_name]
+            except KeyError:
+                raise ConfigAgentAccountNotFound("Config Agent account name %s not found" % cfg_agent_account_name)
+
+            account.start_validate_credentials(self._loop)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
+
+class ConfigAgentJob(object):
+    """A wrapper over the config agent job object, providing some
+    convenience functions.
+
+    YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+    ||
+     ==> VNFRS
+          ||
+           ==> Primitives
+
+    """
+    # The normalizes the state terms from Juju to our yang models
+    # Juju : Yang model
+    STATUS_MAP = {"completed": "success",
+                  "pending"  : "pending",
+                  "running"  : "pending",
+                  "failed"   : "failure"}
+
+    def __init__(self, nsr_id, job, tasks=None):
+        """
+        Args:
+            nsr_id (uuid): ID of NSR record
+            job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+            tasks: List of asyncio.tasks. If provided the job monitor will
+                use it to monitor the tasks instead of the execution IDs
+        """
+        self._job = job
+        self.nsr_id = nsr_id
+        self.tasks = tasks
+
+    @property
+    def id(self):
+        """Job id"""
+        return self._job.job_id
+
+    @property
+    def name(self):
+        """Job name"""
+        return self._job.job_name
+
+    @property
+    def job_status(self):
+        """Status of the job (success|pending|failure)"""
+        return self._job.job_status
+
+    @job_status.setter
+    def job_status(self, value):
+        """Setter for job status"""
+        self._job.job_status = value
+
+    @property
+    def job(self):
+        """Gi object"""
+        return self._job
+
+    @property
+    def xpath(self):
+        """Xpath of the job"""
+        return ("D,/nsr:ns-instance-opdata" +
+                "/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
+                "/nsr:config-agent-job[nsr:job-id='{}']"
+                ).format(self.nsr_id, self.id)
+
+    @staticmethod
+    def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
+        """A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
+        to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+
+        Args:
+            nsr_id (uuid): NSR ID
+            rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): RPC output
+            tasks (list): A list of asyncio.Tasks
+
+        Returns:
+            ConfigAgentJob
+        """
+        # Shortcuts to prevent the HUUGE names.
+        CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
+        CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+        CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+        CfgAgentPrimitiveParam =  NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+
+        job = CfgAgentJob.from_dict({
+                "job_id": rpc_output.job_id,
+                "job_name" : rpc_output.name,
+                "job_status": "pending",
+                "triggered_by": rpc_output.triggered_by,
+                "create_time": rpc_output.create_time,
+                "job_status_details": rpc_output.job_status_details if rpc_output.job_status_details is not None else None,
+                "parameter": [param.as_dict() for param in rpc_output.parameter],
+                "parameter_group": [pg.as_dict() for pg in rpc_output.parameter_group]
+            })
+
+        for vnfr in rpc_output.vnf_out_list:
+            vnfr_job = CfgAgentVnfr.from_dict({
+                    "id": vnfr.vnfr_id_ref,
+                    "vnf_job_status": "pending",
+                    })
+
+            for primitive in vnfr.vnf_out_primitive:
+                vnf_primitive = CfgAgentPrimitive.from_dict({
+                        "name": primitive.name,
+                        "execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status],
+                        "execution_id": primitive.execution_id
+                    })
+
+                # Copy over the input param
+                for param in primitive.parameter:
+                    vnf_primitive.parameter.append(
+                            CfgAgentPrimitiveParam.from_dict({
+                                    "name": param.name,
+                                    "value": param.value
+                            }))
+
+                vnfr_job.primitive.append(vnf_primitive)
+
+            job.vnfr.append(vnfr_job)
+
+        return ConfigAgentJob(nsr_id, job, tasks)
+
+
+class ConfigAgentJobMonitor(object):
+    """Job monitor: Polls the Juju controller and get the status.
+    Rules:
+        If all Primitive are success, then vnf & nsr status will be "success"
+        If any one Primitive reaches a failed state then both vnf and nsr will fail.
+    """
+    POLLING_PERIOD = 2
+
+    def __init__(self, dts, log, job, executor, loop, config_plugin):
+        """
+        Args:
+            dts : DTS handle
+            log : log handle
+            job (ConfigAgentJob): ConfigAgentJob instance
+            executor (concurrent.futures): Executor for juju status api calls
+            loop (eventloop): Current event loop instance
+            config_plugin : Config plugin to be used.
+        """
+        self.job = job
+        self.log = log
+        self.loop = loop
+        self.executor = executor
+        self.polling_period = ConfigAgentJobMonitor.POLLING_PERIOD
+        self.config_plugin = config_plugin
+        self.dts = dts
+
+    @asyncio.coroutine
+    def _monitor_processes(self, registration_handle):
+        result = 0
+        for process in self.job.tasks:
+            rc = yield from process
+            self.log.debug("Process {} returned rc: {}".format(process, rc))
+            result |= rc
+
+        if result == 0:
+            self.job.job_status = "success"
+        else:
+            self.job.job_status = "failure"
+
+        registration_handle.update_element(self.job.xpath, self.job.job)
+
+    def get_error_details(self):
+        '''Get the error details from failed primitives'''
+        errs = ''
+        for vnfr in self.job.job.vnfr:
+            if vnfr.vnf_job_status != "failure":
+                continue
+
+            for primitive in vnfr.primitive:
+                if primitive.execution_status == "failure":
+                    errs += '<error>'
+                    errs += primitive.execution_error_details
+                    errs += "</error>"
+
+        return errs
+
+    @asyncio.coroutine
+    def publish_action_status(self):
+        """
+        Starts publishing the status for jobs/primitives
+        """
+        registration_handle = yield from self.dts.register(
+                xpath=self.job.xpath,
+                handler=rift.tasklets.DTS.RegistrationHandler(),
+                flags=(rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ),
+                )
+
+        self.log.debug('preparing to publish job status for {}'.format(self.job.xpath))
+
+        try:
+            registration_handle.create_element(self.job.xpath, self.job.job)
+
+            # If the config is done via a user defined script
+            if self.job.tasks is not None:
+                yield from self._monitor_processes(registration_handle)
+                return
+
+            prev = time.time()
+            # Run until pending moves to either failure/success
+            while self.job.job_status == "pending":
+                curr = time.time()
+
+                if curr - prev < self.polling_period:
+                    pause = self.polling_period - (curr - prev)
+                    yield from asyncio.sleep(pause, loop=self.loop)
+
+                prev = time.time()
+
+                tasks = []
+                for vnfr in self.job.job.vnfr:
+                    task = self.loop.create_task(self.get_vnfr_status(vnfr))
+                    tasks.append(task)
+
+                # Exit, if no tasks are found
+                if not tasks:
+                    break
+
+                yield from asyncio.wait(tasks, loop=self.loop)
+
+                job_status = [task.result() for task in tasks]
+
+                if "failure" in job_status:
+                    self.job.job_status = "failure"
+                    errs = self.get_error_details()
+                    if len(errs):
+                        self.job.job.job_status_details = errs
+                elif "pending" in job_status:
+                    self.job.job_status = "pending"
+                else:
+                    self.job.job_status = "success"
+
+                # self.log.debug("Publishing job status: {} at {} for nsr id: {}".format(
+                #     self.job.job_status,
+                #     self.job.xpath,
+                #     self.job.nsr_id))
+
+                registration_handle.update_element(self.job.xpath, self.job.job)
+
+
+        except Exception as e:
+            self.log.exception(e)
+            raise
+
+
+    @asyncio.coroutine
+    def get_vnfr_status(self, vnfr):
+        """Schedules tasks for all containing primitives and updates it's own
+        status.
+
+        Args:
+            vnfr : Vnfr job record containing primitives.
+
+        Returns:
+            (str): "success|failure|pending"
+        """
+        tasks = []
+        job_status = []
+
+        for primitive in vnfr.primitive:
+            if primitive.execution_id == "":
+                # TODO: For some config data, the id will be empty, check if
+                # mapping is needed.
+                job_status.append(primitive.execution_status)
+                continue
+
+            task = self.loop.create_task(self.get_primitive_status(primitive))
+            tasks.append(task)
+
+        if tasks:
+            yield from asyncio.wait(tasks, loop=self.loop)
+
+        job_status.extend([task.result() for task in tasks])
+        if "failure" in job_status:
+            vnfr.vnf_job_status = "failure"
+            return "failure"
+
+        elif "pending" in job_status:
+            vnfr.vnf_job_status = "pending"
+            return "pending"
+
+        else:
+            vnfr.vnf_job_status = "success"
+            return "success"
+
+    @asyncio.coroutine
+    def get_primitive_status(self, primitive):
+        """
+        Queries the juju api and gets the status of the execution id.
+
+        Args:
+            primitive : Primitive containing the execution ID.
+        """
+
+        try:
+            resp = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.config_plugin.get_action_status,
+                    primitive.execution_id
+                    )
+
+            status = resp['status']
+            if status == 'failed':
+                self.log.warning("Execution of action {} failed: {}".
+                                 format(primitive.execution_id, resp))
+                primitive.execution_error_details = resp['message']
+
+        except Exception as e:
+            self.log.exception(e)
+            status = "failed"
+
+        # Handle case status is None
+        if status:
+            primitive.execution_status = ConfigAgentJob.STATUS_MAP[status]
+        else:
+            primitive.execution_status = "failure"
+
+        return primitive.execution_status
+
+
+class CfgAgentJobDtsHandler(object):
+    """Dts Handler for CfgAgent"""
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr/nsr:config-agent-job"
+
+    def __init__(self, dts, log, loop, nsm, cfgm):
+        """
+        Args:
+            dts  : Dts Handle.
+            log  : Log handle.
+            loop : Event loop.
+            nsm  : NsmManager.
+            cfgm : ConfigManager.
+        """
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cfgm = cfgm
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NSManager manager instance """
+        return self._nsm
+
+    @property
+    def cfgm(self):
+        """ Return the ConfigManager manager instance """
+        return self._cfgm
+
+    @staticmethod
+    def cfg_job_xpath(nsr_id, job_id):
+        return ("D,/nsr:ns-instance-opdata" +
+                "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
+                "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NS monitoring read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            if action == rwdts.QueryAction.READ:
+                schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                try:
+                    nsr_id = path_entry.key00.ns_instance_config_ref
+
+                    #print("###>>> self.nsm.nsrs:", self.nsm.nsrs)
+                    nsr_ids = []
+                    if nsr_id is None or nsr_id == "":
+                        nsrs = list(self.nsm.nsrs.values())
+                        nsr_ids = [nsr.id for nsr in nsrs if nsr is not None]
+                    else:
+                        nsr_ids = [nsr_id]
+
+                    for nsr_id in nsr_ids:
+                        job = self.cfgm.get_job(nsr_id)
+
+                        # If no jobs are queued for the NSR
+                        if job is None:
+                            continue
+
+                        xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.job_id),
+                            job)
+
+                except Exception as e:
+                    self._log.exception("Caught exception:%s", str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+            else:
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class ConfigAgentJobManager(object):
+    """A central class that manager all the Config Agent related data,
+    Including updating the status
+
+    TODO: Needs to support multiple config agents.
+    """
+    def __init__(self, dts, log, loop, nsm):
+        """
+        Args:
+            dts  : Dts handle
+            log  : Log handler
+            loop : Event loop
+            nsm  : NsmTasklet instance
+        """
+        self.jobs = {}
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.nsm = nsm
+        self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+
+    def add_job(self, rpc_output, tasks=None):
+        """Once an RPC is trigger add a now job
+
+        Args:
+            rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
+            rpc_input (YangInput_Nsr_ExecNsConfigPrimitive): Rpc input
+            tasks(list) A list of asyncio.Tasks
+
+        """
+        nsr_id = rpc_output.nsr_id_ref
+
+        self.jobs[nsr_id] = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+
+        self.log.debug("Creating a job monitor for Job id: {}".format(
+                rpc_output.job_id))
+
+        # For every Job we will schedule a new monitoring process.
+        job_monitor = ConfigAgentJobMonitor(
+            self.dts,
+            self.log,
+            self.jobs[nsr_id],
+            self.executor,
+            self.loop,
+            self.nsm.config_agent_plugins[0]  # Hack
+            )
+        task = self.loop.create_task(job_monitor.publish_action_status())
+
+    def get_job(self, nsr_id):
+        """Get the job associated with the NSR Id, if present."""
+        try:
+            return self.jobs[nsr_id].job
+        except KeyError:
+            return None
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.handler.register()
diff --git a/common/python/rift/mano/config_data/__init__.py b/common/python/rift/mano/config_data/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/config_data/config.py b/common/python/rift/mano/config_data/config.py
new file mode 100644 (file)
index 0000000..63a2e48
--- /dev/null
@@ -0,0 +1,430 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import abc
+import json
+import os
+import yaml
+
+from gi.repository import NsdYang
+from gi.repository import VnfdYang
+
+
+class InitialConfigReadError(Exception):
+    pass
+
+
+class InitialConfigMethodError(Exception):
+    pass
+
+
+class InitialConfigPrimitiveReader(object):
+    """ Reader for the VNF Initial Config Input Data
+
+    This class interprets the Initial Config Primitive
+    Input data and constructs inital config primitive
+    protobuf messages.
+
+    The reason for creating a new format is to keep the structure
+    as dead-simple as possible for readability.
+
+    The structure (not serialization format) is defined as the
+    following.
+
+    [
+        {
+          "name": <primitive_name>,
+          "parameter": {
+            "hostname": "pe1"
+            "pass": "6windos"
+            ...
+          }
+        }
+        ...
+    ]
+
+    """
+    def __init__(self, primitive_input):
+        self._primitives = []
+
+        self._parse_input_data(primitive_input)
+
+    def _parse_input_data(self, input_dict):
+        for seq, cfg in enumerate(input_dict):
+            if "name" not in cfg:
+                raise InitialConfigReadError("Initial config primitive must have a name")
+
+            name = cfg["name"]
+
+            new_primitive = self. get_initial_config_primitive(seq=seq, name=name)
+            self._primitives.append(new_primitive)
+            if "parameter" in cfg:
+                for key, val in cfg["parameter"].items():
+                    new_primitive.parameter.add(name=key, value=val)
+
+    @abc.abstractmethod
+    def get_initial_config_primitive(self, seq, name):
+        '''Override in sub class to provide the correct yang model'''
+        raise InitialConfigMethodError(
+            "InitialConfigPrimitiveReader Calling abstract class method")
+
+    @property
+    def primitives(self):
+        """ Returns a copy of the read inital config primitives"""
+        return [prim.deep_copy() for prim in self._primitives]
+
+    @classmethod
+    def from_yaml_file_hdl(cls, file_hdl):
+        """ Create a instance of InitialConfigPrimitiveFileData
+        by reading a YAML file handle.
+
+        Arguments:
+            file_hdl - A file handle which contains serialized YAML which
+                       follows the documented structure.
+
+        Returns:
+            A new InitialConfigPrimitiveFileData() instance
+
+        Raises:
+            InitialConfigReadError: Input Data was malformed or could not be read
+        """
+        try:
+            input_dict = yaml.safe_load(file_hdl)
+        except yaml.YAMLError as e:
+            raise InitialConfigReadError(e)
+
+        return cls(input_dict)
+
+
+class VnfInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+    '''Class to read the VNF initial config primitives'''
+
+    def __init__(self, primitive_input):
+        super(VnfInitialConfigPrimitiveReader, self).__init__(primitive_input)
+
+    def get_initial_config_primitive(self, seq, name):
+        return VnfdYang.InitialConfigPrimitive(seq=seq, name=name)
+
+
+class NsInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+    '''Class to read the NS initial config primitives'''
+
+    def __init__(self, primitive_input):
+        super(NsInitialConfigPrimitiveReader, self).__init__(primitive_input)
+
+    def get_initial_config_primitive(self, seq, name):
+        return NsdYang.NsdInitialConfigPrimitive(seq=seq, name=name)
+
+
+class ConfigPrimitiveConvertor(object):
+    PARAMETER = "parameter"
+    PARAMETER_GROUP = "parameter_group"
+    CONFIG_PRIMITIVE = "service_primitive"
+    INITIAL_CONFIG_PRIMITIVE = "initial_config_primitive"
+
+    def _extract_param(self, param, field="default_value"):
+        key = param.name
+        value = getattr(param, field, None)
+
+        if value is not None:
+            setattr(param, field, None)
+
+        return key, value
+
+    def _extract_parameters(self, parameters, input_data, field="default_value"):
+        input_data[self.PARAMETER] = {}
+        for param in parameters:
+            key, value = self._extract_param(param, field)
+
+            if value is None:
+                continue
+
+            input_data[self.PARAMETER][key] = value
+
+        if not input_data[self.PARAMETER]:
+            del input_data[self.PARAMETER]
+
+    def _extract_parameter_group(self, param_groups, input_data):
+        input_data[self.PARAMETER_GROUP] = {}
+        for param_group in param_groups:
+            input_data[self.PARAMETER_GROUP][param_group.name] = {}
+            for param in param_group.parameter:
+                key, value = self._extract_param(param)
+
+                if value is None:
+                    continue
+
+                input_data[self.PARAMETER_GROUP][param_group.name][key] = value
+
+        if not input_data[self.PARAMETER_GROUP]:
+            del input_data[self.PARAMETER_GROUP]
+
+    def extract_config(self,
+                       config_primitives=None,
+                       initial_configs=None,
+                       format="yaml"):
+        input_data = {}
+
+        if config_primitives:
+            input_data[self.CONFIG_PRIMITIVE] = {}
+            for config_primitive in config_primitives:
+                input_data[self.CONFIG_PRIMITIVE][config_primitive.name] = {}
+                self._extract_parameters(
+                    config_primitive.parameter,
+                    input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+
+                try:
+                    self._extract_parameter_group(
+                        config_primitive.parameter_group,
+                        input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+                except AttributeError:
+                    pass
+
+                if not input_data[self.CONFIG_PRIMITIVE][config_primitive.name]:
+                    del input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+
+            if not input_data[self.CONFIG_PRIMITIVE]:
+                del input_data[self.CONFIG_PRIMITIVE]
+
+
+        if initial_configs:
+            input_data[self.INITIAL_CONFIG_PRIMITIVE] = []
+            for in_config_primitive in initial_configs:
+                primitive = {}
+                self._extract_parameters(
+                    in_config_primitive.parameter,
+                    primitive,
+                    field="value")
+
+                if primitive:
+                    input_data[self.INITIAL_CONFIG_PRIMITIVE].append(
+                        {
+                            "name": in_config_primitive.name,
+                            self.PARAMETER: primitive[self.PARAMETER],
+                        }
+                    )
+
+            if not input_data[self.INITIAL_CONFIG_PRIMITIVE]:
+                del input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+        if len(input_data):
+            if format == "json":
+                return json.dumps(input_data)
+            elif format == "yaml":
+                return yaml.dump(input_data, default_flow_style=False)
+        else:
+            return ''
+
+    def extract_nsd_config(self, nsd, format="yaml"):
+        config_prim = None
+        try:
+            config_prim = nsd.service_primitive
+        except AttributeError:
+            pass
+
+        initial_conf = None
+        try:
+            initial_conf = nsd.initial_config_primitive
+        except AttributeError:
+            pass
+
+        return self.extract_config(
+            config_primitives=config_prim,
+            initial_configs=initial_conf,
+            format=format)
+
+    def extract_vnfd_config(self, vnfd, format="yaml"):
+        config_prim = None
+        try:
+            config_prim = vnfd.vnf_configuration.service_primitive
+        except AttributeError:
+            pass
+
+        initial_conf = None
+        try:
+            initial_conf = vnfd.vnf_configuration.initial_config_primitive
+        except AttributeError:
+            pass
+
+        return self.extract_config(
+            config_primitives=config_prim,
+            initial_configs=initial_conf,
+            format=format)
+
+    def merge_params(self, parameters, input_config, field="default_value"):
+        for param in parameters:
+            try:
+                setattr(param, field, input_config[param.name])
+            except KeyError:
+                pass
+
+    def add_nsd_initial_config(self, nsd_init_cfg_prim_msg, input_data):
+        """ Add initial config primitives from NS Initial Config Input Data
+
+        Arguments:
+            nsd_init_cfg_prim_msg - manotypes:nsd/initial_config_primitive pb msg
+            ns_input_data - NsInitialConfigPrimitiveReader documented input data
+
+        Raises:
+           InitialConfigReadError: VNF input data was malformed
+        """
+        if self.INITIAL_CONFIG_PRIMITIVE in input_data:
+            ns_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+            reader = NsInitialConfigPrimitiveReader(ns_input_data)
+            for prim in reader.primitives:
+                nsd_init_cfg_prim_msg.append(prim)
+
+    def merge_nsd_initial_config(self, nsd, input_data):
+        try:
+            for config_primitive in nsd.initial_config_primitive:
+                for cfg in input_data[self.INITIAL_CONFIG_PRIMITIVE]:
+                    if cfg['name'] == config_primitive.name:
+                        self.merge_params(
+                            config_primitive.parameter,
+                            cfg[self.PARAMETER],
+                            field="value")
+                        break
+
+        except AttributeError as e:
+            self._log.debug("Did not find initial-config-primitive for NSD {}: {}".
+                            format(nsd.name, e))
+
+
+    def merge_nsd_config(self, nsd, input_data):
+        for config_primitive in nsd.service_primitive:
+            try:
+                cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+            except KeyError:
+                continue
+
+            self.merge_params(
+                    config_primitive.parameter,
+                    cfg[self.PARAMETER])
+
+            for param_group in config_primitive.parameter_group:
+                self.merge_params(
+                        param_group.parameter,
+                        cfg[self.PARAMETER_GROUP][param_group.name])
+
+    def add_vnfd_initial_config(self, vnfd_init_cfg_prim_msg, input_data):
+        """ Add initial config primitives from VNF Initial Config Input Data
+
+        Arguments:
+            vnfd_init_cfg_prim_msg - manotypes:vnf-configuration/initial_config_primitive pb msg
+            vnf_input_data - VnfInitialConfigPrimitiveReader documented input data
+
+        Raises:
+           InitialConfigReadError: VNF input data was malformed
+        """
+        if self.INITIAL_CONFIG_PRIMITIVE in input_data:
+            vnf_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
+
+            reader = VnfInitialConfigPrimitiveReader(vnf_input_data)
+            for prim in reader.primitives:
+                vnfd_init_cfg_prim_msg.append(prim)
+
+    def merge_vnfd_config(self, vnfd, input_data):
+        for config_primitive in vnfd.vnf_configuration.service_primitive:
+            try:
+                cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+            except KeyError:
+                continue
+
+            self.merge_params(
+                config_primitive.parameter,
+                cfg[self.PARAMETER])
+
+
+class ConfigStore(object):
+    """Convenience class that fetches all the instance related data from the
+    $RIFT_ARTIFACTS/launchpad/libs directory.
+    """
+
+    def __init__(self, log):
+        """
+        Args:
+            log : Log handle.
+        """
+        self._log = log
+        self.converter = ConfigPrimitiveConvertor()
+
+    def merge_vnfd_config(self, nsd_id, vnfd, member_vnf_index):
+        """Merges the vnfd config from the config directory.
+
+        Args:
+            nsd_id (str): Id of the NSD object
+            vnfd_msg : VNFD pb message containing the VNFD id and
+                       the member index ref.
+        """
+        nsd_archive = os.path.join(
+            os.getenv('RIFT_ARTIFACTS'),
+            "launchpad/libs",
+            nsd_id,
+            "config")
+
+        self._log.info("Looking for config from the archive {}".format(nsd_archive))
+
+        if not os.path.exists(nsd_archive):
+            return
+
+        config_file = os.path.join(nsd_archive,
+                                   "{}__{}.yaml".format(vnfd.id, member_vnf_index))
+
+        if not os.path.exists(config_file):
+            self._log.info("Could not find VNF initial config in archive: %s", config_file)
+            return
+
+        input_data = self.read_from_file(config_file)
+        self._log.info("Loaded VNF config file {}: {}".format(config_file, input_data))
+
+        self.converter.merge_vnfd_config(vnfd, input_data)
+
+        self.converter.add_vnfd_initial_config(
+            vnfd.vnf_configuration.initial_config_primitive,
+            input_data,
+        )
+
+    def read_from_file(self, filename):
+        with open(filename) as fh:
+            input_data = yaml.load(fh)
+        return input_data
+
+    def merge_nsd_config(self, nsd):
+        nsd_archive = os.path.join(
+            os.getenv('RIFT_ARTIFACTS'),
+            "launchpad/libs",
+            nsd.id,
+            "config")
+
+        self._log.info("Looking for config from the archive {}".format(nsd_archive))
+
+        if not os.path.exists(nsd_archive):
+            return
+
+        config_file = os.path.join(nsd_archive,
+                                   "{}.yaml".format(nsd.id))
+        if not os.path.exists(config_file):
+            self._log.info("Could not find NS config in archive: %s", config_file)
+            return
+
+        input_data = self.read_from_file(config_file)
+        self._log.info("Loaded NS config file {}: {}".format(config_file, input_data))
+
+        self.converter.merge_nsd_config(nsd, input_data)
+
+        self.converter.merge_nsd_initial_config(nsd, input_data)
diff --git a/common/python/rift/mano/config_data/test/__init__.py b/common/python/rift/mano/config_data/test/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/config_data/test/test_converter.py b/common/python/rift/mano/config_data/test/test_converter.py
new file mode 100644 (file)
index 0000000..1bfd7d7
--- /dev/null
@@ -0,0 +1,424 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import uuid
+from gi.repository import NsdYang, VnfdYang
+from ..config import ConfigPrimitiveConvertor
+import yaml
+
+@pytest.fixture(scope="function")
+def nsd():
+    catalog = NsdYang.YangData_Nsd_NsdCatalog()
+    nsd = catalog.nsd.add()
+    nsd.id = str(uuid.uuid1())
+    return nsd
+
+@pytest.fixture(scope="function")
+def vnfd():
+    catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+    vnfd = catalog.vnfd.add()
+    vnfd.id = str(uuid.uuid1())
+    return vnfd
+
+@pytest.fixture(scope="session")
+def convertor():
+    return ConfigPrimitiveConvertor()
+
+def test_nsd_config(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        expected_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        assert expected_yaml == \
+               convertor.extract_nsd_config(nsd)
+
+
+def test_nsd_multiple_config(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [{
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    }],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [{
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation 2",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        expected_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+Add SP Test Corporation 2:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        assert yaml.load(expected_yaml) == \
+               yaml.load(convertor.extract_nsd_config(nsd))
+
+
+def test_vnfd_config(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    expected_yaml = """initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    assert expected_yaml == convertor.extract_vnfd_config(vnfd)
+
+def test_vnfd_config_prim(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [
+                {"name": "Foo", "default_value": "Bar"}
+        ]
+        })
+
+    expected_yaml = """service_primitive:
+  PE1:
+    parameter:
+      Foo: Bar
+initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    assert expected_yaml == convertor.extract_vnfd_config(vnfd)
+
+
+
+def test_vnfd_merge(vnfd, convertor):
+    vnf_config = vnfd.vnf_configuration
+
+    # Set the initital-config
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [{"name": "vpe-router"},
+                          {"name": "user"},
+                          {"name": "pass"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [{"name": "cidr"}],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [{"name": "Foo",}]
+        })
+
+    ip_yaml = """service_primitive:
+  PE1:
+    parameter:
+      Foo: Bar
+initial_config_primitive:
+  config:
+    parameter:
+      pass: 6windos
+      user: root
+      vpe-router: <rw_mgmt_ip>
+  configure-interface:
+    parameter:
+      cidr: 10.10.10.2/30
+"""
+
+    catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+    expected_vnfd = catalog.vnfd.add()
+    vnf_config = expected_vnfd.vnf_configuration
+    expected_vnfd.id = vnfd.id
+
+    # Set the initital-confi
+    vnf_config.initial_config_primitive.add().from_dict({
+            "seq": 1,
+            "name": "config",
+            "parameter": [
+                {"name": "vpe-router", "value": "<rw_mgmt_ip>"},
+                {"name": "user", "value": "root"},
+                {"name": "pass", "value": "6windos"}
+            ]
+        })
+
+    vnf_config.initial_config_primitive.add().from_dict({
+            "name": "configure-interface",
+            "seq": 2,
+            "parameter": [
+                {"value": "10.10.10.2/30", "name": "cidr"}
+            ],
+        })
+
+    vnf_config.service_primitive.add().from_dict({
+        "name": "PE1",
+        "parameter": [
+                {"name": "Foo", "default_value": "Bar"}
+        ]
+        })
+
+    convertor.merge_vnfd_config(vnfd, yaml.load(ip_yaml))
+
+    assert vnfd.as_dict() == expected_vnfd.as_dict()
+
+
+def test_nsd_merge(nsd, convertor):
+        nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        ip_yaml = """Add SP Test Corporation:
+  parameter:
+    Tunnel Key: '10'
+  parameter_group:
+    PE1:
+      Vlan ID: '3000'
+    PE2:
+      Vlan ID: '3000'
+"""
+
+        catalog = NsdYang.YangData_Nsd_NsdCatalog()
+        expected_nsd = catalog.nsd.add()
+        expected_nsd.id = nsd.id
+        expected_nsd.service_primitive.add().from_dict(
+            {
+                "parameter_group": [
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE1",
+                        "mandatory": False
+                    },
+                    {
+                        "parameter": [
+                            {
+                                "data_type": "INTEGER",
+                                "default_value": "3000",
+                                "name": "Vlan ID",
+                                "mandatory": True
+                            }
+                        ],
+                        "name": "PE2",
+                        "mandatory": False
+                    }
+                ],
+                "parameter": [
+                    {
+                        "data_type": "INTEGER",
+                        "default_value": "10",
+                        "name": "Tunnel Key",
+                        "mandatory": True,
+                    }
+                ],
+                "name": "Add SP Test Corporation",
+                "user_defined_script": "add_corporation.py"
+            })
+
+        convertor.merge_nsd_config(nsd, yaml.load(ip_yaml))
+
+        assert nsd.as_dict() == expected_nsd.as_dict()
+
+
diff --git a/common/python/rift/mano/dts/__init__.py b/common/python/rift/mano/dts/__init__.py
new file mode 100644 (file)
index 0000000..e523034
--- /dev/null
@@ -0,0 +1,24 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .core import DtsHandler
+
+# Subscribers
+from .subscriber.core import AbstractOpdataSubscriber, AbstractConfigSubscriber
+from .subscriber.vnf_subscriber import VnfdCatalogSubscriber, VnfrCatalogSubscriber
+from .subscriber.ns_subscriber import NsrCatalogSubscriber, NsdCatalogSubscriber
+from .subscriber.store import SubscriberStore
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/core.py b/common/python/rift/mano/dts/core.py
new file mode 100644 (file)
index 0000000..4894e16
--- /dev/null
@@ -0,0 +1,40 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+class DtsHandler(object):
+    """A common class to hold the barebone objects to build a publisher or
+    subscriber
+    """
+    def __init__(self, log, dts, loop):
+        """Constructor
+
+        Args:
+            log : Log handle
+            dts : DTS handle
+            loop : Asyncio event loop.
+        """
+        # Reg handle
+        self.reg = None
+        self.log = log
+        self.dts = dts
+        self.loop = loop
diff --git a/common/python/rift/mano/dts/subscriber/__init__.py b/common/python/rift/mano/dts/subscriber/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/dts/subscriber/core.py b/common/python/rift/mano/dts/subscriber/core.py
new file mode 100644 (file)
index 0000000..dd2513e
--- /dev/null
@@ -0,0 +1,215 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import abc
+import collections
+import asyncio
+
+from gi.repository import (RwDts as rwdts, ProtobufC)
+import rift.tasklets
+
+from ..core import DtsHandler
+
+
+class SubscriberDtsHandler(DtsHandler):
+    """A common class for all subscribers.
+    """
+    @classmethod
+    def from_tasklet(cls, tasklet, callback=None):
+        """Convenience method to build the object from tasklet
+
+        Args:
+            tasklet (rift.tasklets.Tasklet): Tasklet
+            callback (None, optional): Callable, which will be invoked on
+                    subscriber changes.
+
+        Signature of callback:
+            Args:
+                msg: The Gi Object msg from DTS
+                action(rwdts.QueryAction): Action type
+        """
+        return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+        self.callback = callback
+
+    def get_reg_flags(self):
+        """Default set of REG flags, can be over-ridden by sub classes.
+        
+        Returns:
+            Set of rwdts.Flag types.
+        """
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+
+
+
+class AbstractOpdataSubscriber(SubscriberDtsHandler):
+    """Abstract class that simplifies the process of creating subscribers
+    for opdata.
+
+    Opdata subscriber can be created in one step by subclassing and implementing
+    the MANDATORY get_xpath() method
+    
+    """
+    @abc.abstractmethod
+    def get_xpath(self):
+        """
+        Returns:
+           str: xpath
+        """
+        pass
+
+    @asyncio.coroutine
+    def register(self):
+        """Triggers the registration
+        """
+        xacts = {}
+
+        def on_commit(xact_info):
+            xact_id = xact_info.handle.get_xact().id
+            if xact_id in xacts:
+                msg, action = xacts.pop(xact_id)
+
+                if self.callback:
+                    self.callback(msg, action)
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                # Defer all actions till the commit state.
+                xacts[xact_info.xact.id] = (msg, action)
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        reg_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready,
+                on_prepare=on_prepare,
+                on_commit=on_commit
+                )
+
+        self.reg = yield from self.dts.register(
+                xpath=self.get_xpath(),
+                flags=self.get_reg_flags(),
+                handler=handler)
+
+        # yield from reg_event.wait()
+
+        assert self.reg is not None
+
+    def deregister(self):
+        self.reg.deregister()
+
+
+class AbstractConfigSubscriber(SubscriberDtsHandler):
+    """Abstract class that simplifies the process of creating subscribers
+    for config data.
+
+    Config subscriber can be created in one step by subclassing and implementing
+    the MANDATORY get_xpath() method
+    
+    """
+    KEY = "msgs"
+
+    @abc.abstractmethod
+    def get_xpath(self):
+        pass
+
+    @abc.abstractmethod
+    def key_name(self):
+        pass
+
+    def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
+        # Unforunately, it is currently difficult to figure out what has exactly
+        # changed in this xact without Pbdelta support (RIFT-4916)
+        # As a workaround, we can fetch the pre and post xact elements and
+        # perform a comparison to figure out adds/deletes/updates
+        xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+        curr_cfgs = list(dts_member_reg.elements)
+
+        xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+        curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+        # Find Adds
+        added_keys = set(xact_key_map) - set(curr_key_map)
+        added_cfgs = [xact_key_map[key] for key in added_keys]
+
+        # Find Deletes
+        deleted_keys = set(curr_key_map) - set(xact_key_map)
+        deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+        # Find Updates
+        updated_keys = set(curr_key_map) & set(xact_key_map)
+        updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+        return added_cfgs, deleted_cfgs, updated_cfgs
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+
+
+            add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name=self.key_name())
+
+            [self.callback(cfg, rwdts.QueryAction.DELETE)
+                    for cfg in delete_cfgs if self.callback]
+
+            [self.callback(cfg, rwdts.QueryAction.CREATE)
+                    for cfg in add_cfgs if self.callback]
+
+            [self.callback(cfg, rwdts.QueryAction.UPDATE)
+                    for cfg in update_cfgs if self.callback]
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self.dts.appconf_group_create(handler=acg_hdl) as acg:
+            self.reg = acg.register(
+                xpath=self.get_xpath(),
+                flags=self.get_reg_flags(),
+                on_prepare=on_prepare)
+
+    def deregister(self):
+        self.reg.deregister()
diff --git a/common/python/rift/mano/dts/subscriber/ns_subscriber.py b/common/python/rift/mano/dts/subscriber/ns_subscriber.py
new file mode 100644 (file)
index 0000000..1078e85
--- /dev/null
@@ -0,0 +1,52 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file ns_subscriber.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import gi
+gi.require_version('RwDts', '1.0')
+from gi.repository import RwDts as rwdts
+
+from . import core
+
+
+class NsrCatalogSubscriber(core.AbstractOpdataSubscriber):
+    """Nsr Listener """
+
+    def key_name(self):
+        return "ns_instance_config_ref"
+
+    def get_reg_flags(self):
+        # Hack to around DTS issue with NSR and RwNsr
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
+
+    def get_xpath(self):
+        return "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+
+class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
+    """ The network service descriptor DTS handler """
+
+    def key_name(self):
+        return "id"
+
+    def get_xpath(self):
+        return "C,/nsd:nsd-catalog/nsd:nsd"
diff --git a/common/python/rift/mano/dts/subscriber/store.py b/common/python/rift/mano/dts/subscriber/store.py
new file mode 100644 (file)
index 0000000..88cb79a
--- /dev/null
@@ -0,0 +1,119 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file store.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import asyncio
+import enum
+
+from gi.repository import RwDts as rwdts
+from . import core, ns_subscriber, vnf_subscriber
+
+
+class SubscriberStore(core.SubscriberDtsHandler):
+    """A convenience class that hold all the VNF and NS related config and Opdata
+    """
+    KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+
+        params = (self.log, self.dts, self.loop)
+
+        self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
+        self._nsrs = {}
+        self._nsd_sub = ns_subscriber.NsdCatalogSubscriber(*params)
+
+        self._vnfr_sub = vnf_subscriber.VnfrCatalogSubscriber(*params, callback=self.on_vnfr_change)
+        self._vnfrs = {}
+        self._vnfd_sub = vnf_subscriber.VnfdCatalogSubscriber(*params)
+
+    @property
+    def vnfd(self):
+        return list(self._vnfd_sub.reg.get_xact_elements())
+
+    @property
+    def nsd(self):
+        return list(self._nsd_sub.reg.get_xact_elements())
+
+    @property
+    def vnfr(self):
+        return list(self._vnfrs.values())
+
+    @property
+    def nsr(self):
+        return list(self._nsrs.values())
+
+    def _unwrap(self, values, id_name):
+        try:
+            return values[0]
+        except KeyError:
+            self.log.exception("Unable to find the object with the given "
+                "ID {}".format(id_name))
+
+    def get_nsr(self, nsr_id):
+        values = [nsr for nsr in self.nsr if nsr.ns_instance_config_ref == nsr_id]
+        return self._unwrap(values, nsr_id)
+
+    def get_nsd(self, nsd_id):
+        values = [nsd for nsd in self.nsd if nsd.id == nsd_id]
+        return self._unwrap(values, nsd_id)
+
+    def get_vnfr(self, vnfr_id):
+        values = [vnfr for vnfr in self.vnfr if vnfr.id == vnfr_id]
+        return self._unwrap(values, vnfr_id)
+
+    def get_vnfd(self, vnfd_id):
+        values = [vnfd for vnfd in self.vnfd if vnfd.id == vnfd_id]
+        return self._unwrap(values, vnfd_id)
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._vnfd_sub.register()
+        yield from self._nsd_sub.register()
+        yield from self._vnfr_sub.register()
+        yield from self._nsr_sub.register()
+
+    @asyncio.coroutine
+    def refresh_store(self, subsriber, store):
+        itr = yield from self.dts.query_read(subsriber.get_xpath())
+
+        store.clear()
+        for res in itr:
+            result = yield from res
+            result = result.result
+            store[getattr(result, subsriber.key_name())] = result
+
+    def on_nsr_change(self, msg, action):
+        if action == rwdts.QueryAction.DELETE:
+            if msg.ns_instance_config_ref in self._nsrs:
+                del self._nsrs[msg.ns_instance_config_ref]
+            return
+
+        self.loop.create_task(self.refresh_store(self._nsr_sub, self._nsrs))
+
+    def on_vnfr_change(self, msg, action):
+        if action == rwdts.QueryAction.DELETE:
+            if msg.id in self._vnfrs:
+                del self._vnfrs[msg.id]
+            return
+
+        self.loop.create_task(self.refresh_store(self._vnfr_sub, self._vnfrs))
diff --git a/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py b/common/python/rift/mano/dts/subscriber/test/utest_subscriber_dts.py
new file mode 100644 (file)
index 0000000..a69a00f
--- /dev/null
@@ -0,0 +1,241 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+import types
+import unittest
+import uuid
+
+
+import rift.test.dts
+import rift.mano.dts as store
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfdYang,
+        RwVnfrYang,
+        RwNsrYang,
+        RwNsdYang,
+        VnfrYang
+        )
+
+
+class DescriptorPublisher(object):
+    def __init__(self, log, dts, loop):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+class SubscriberStoreDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+       return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.store = store.SubscriberStore(self.log, self.dts, self.loop)
+        self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+
+    def tearDown(self):
+        super().tearDown()
+
+    @rift.test.dts.async_test
+    def test_vnfd_handler(self):
+        yield from self.store.register()
+
+        mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        mock_vnfd.id = str(uuid.uuid1())
+
+        w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+        xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfd) == 1
+        assert self.store.get_vnfd(self.store.vnfd[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_vnfd)
+        assert len(self.store.vnfd) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        assert len(self.store.vnfd) == 0
+
+    @rift.test.dts.async_test
+    def test_vnfr_handler(self):
+        yield from self.store.register()
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        mock_vnfr.id = str(uuid.uuid1())
+
+        w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+        xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 1
+        assert self.store.get_vnfr(self.store.vnfr[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_vnfr)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.vnfr) == 0
+
+    @rift.test.dts.async_test
+    def test_nsr_handler(self):
+        yield from self.store.register()
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+        mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
+        mock_nsr.name_ref = "Foo"
+
+        w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+        xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
+        yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
+
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 1
+        assert self.store.get_nsr(self.store.nsr[0].ns_instance_config_ref) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_nsr)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsr) == 0
+
+    @rift.test.dts.async_test
+    def test_nsd_handler(self):
+        yield from self.store.register()
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        mock_nsd.id = str(uuid.uuid1())
+
+        w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
+        xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
+
+        yield from asyncio.sleep(2, loop=self.loop)
+        assert len(self.store.nsd) == 1
+        assert self.store.get_nsd(self.store.nsd[0].id) is not None
+
+        yield from self.dts.query_update(xpath, rwdts.XactFlag.ADVISE, mock_nsd)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsd) == 1
+
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        yield from asyncio.sleep(5, loop=self.loop)
+        assert len(self.store.nsd) == 0
+
+    @rift.test.dts.async_test
+    def test_vnfr_crash(self):
+        vnf_handler = store.VnfrCatalogSubscriber(self.log, self.dts, self.loop)
+        def get_reg_flags(self):
+            from gi.repository import RwDts as rwdts
+            return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+
+        vnf_handler.get_reg_flags = types.MethodType(get_reg_flags, vnf_handler)
+
+        # publish
+        yield from vnf_handler.register()
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        mock_vnfr.id = str(uuid.uuid1())
+
+        def mon_xpath(param_id=None):
+            """ Monitoring params xpath """
+            return("D,/vnfr:vnfr-catalog" +
+                   "/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
+                   "/vnfr:monitoring-param" +
+                   ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+
+
+        w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+        xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+        mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+                "id": "1"
+            })
+        mock_vnfr.monitoring_param.append(mock_param)
+        yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/common/python/rift/mano/dts/subscriber/vnf_subscriber.py b/common/python/rift/mano/dts/subscriber/vnf_subscriber.py
new file mode 100644 (file)
index 0000000..76a58ab
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file vnfr_subscriber.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import gi
+gi.require_version('RwDts', '1.0')
+from gi.repository import RwDts as rwdts
+
+from . import core
+
+
+class VnfrCatalogSubscriber(core.AbstractOpdataSubscriber):
+    """Vnfr Listener """
+
+    def key_name(self):
+        return "id"
+
+    def get_reg_flags(self):
+        return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
+
+    def get_xpath(self):
+        return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+
+class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
+    """VNFD Listener"""
+
+    def key_name(self):
+        return "id"
+
+    def get_xpath(self):
+        return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
diff --git a/common/python/rift/mano/ncclient.py b/common/python/rift/mano/ncclient.py
new file mode 100644 (file)
index 0000000..9b87030
--- /dev/null
@@ -0,0 +1,104 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+
+from gi.repository import RwYang
+class ProxyConnectionError(Exception):
+    pass
+
+
+class NcClient(object):
+    '''Class representing a Netconf Session'''
+
+    OPERATION_TIMEOUT_SECS = 240
+
+    def __init__(self, host, port, username, password, loop):
+        '''Initialize a new Netconf Session instance
+
+        Arguments:
+            host - host ip
+            port - host port
+            username - credentials for accessing the host, username
+            password - credentials for accessing the host, password
+
+        Returns:
+            A newly initialized Netconf session instance
+        '''
+        self.host = host
+        self.port = port
+        self.username = username
+        self.password = password
+        self.loop = loop
+        self._nc_mgr = None
+
+        self._model = RwYang.Model.create_libncx()
+
+    @asyncio.coroutine
+    def connect(self, timeout=240):
+        '''Connect Netconf Session
+
+        Arguments:
+            timeout - maximum time allowed before connect fails [default 30s]
+        '''
+        # logger.info("Connecting to confd (%s) SSH port (%s)", self.host, self.port)
+        if self._nc_mgr:
+            return
+
+        start_time = time.time()
+        while (time.time() - start_time) < timeout:
+            try:
+                self._nc_mgr = yield from ncclient.asyncio_manager.asyncio_connect(
+                    loop=self.loop,
+                    host=self.host,
+                    port=self.port,
+                    username=self.username,
+                    password=self.password,
+                    # Setting allow_agent and look_for_keys to false will skip public key
+                    # authentication, and use password authentication.
+                    allow_agent=False,
+                    look_for_keys=False,
+                    hostkey_verify=False)
+
+                # logger.info("Successfully connected to confd (%s) SSH port (%s)", self.host, self.port)
+                self._nc_mgr.timeout = NcClient.OPERATION_TIMEOUT_SECS
+                return
+
+            except ncclient.NCClientError as e:
+                # logger.debug("Could not connect to (%s) confd ssh port (%s): %s",
+                #         self.host, self.port, str(e))
+                pass
+
+            yield from asyncio.sleep(5, loop=self.loop)
+
+        raise ProxyConnectionError("Could not connect to Confd ({}) ssh port ({}): within the timeout {} sec.".format(
+                self.host, self.port, timeout))
+
+    def convert_to_xml(self, module, yang_obj):
+        schema =  getattr(module, "get_schema")
+        self._model.load_schema_ypbc(schema())
+
+        get_xml = getattr(yang_obj, "to_xml_v2")
+
+        return get_xml(self._model)
+
+    @property
+    def manager(self):
+        return self._nc_mgr
diff --git a/common/python/rift/mano/tosca_translator/__init__.py b/common/python/rift/mano/tosca_translator/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/common/__init__.py b/common/python/rift/mano/tosca_translator/common/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/common/exception.py b/common/python/rift/mano/tosca_translator/common/exception.py
new file mode 100644 (file)
index 0000000..554396c
--- /dev/null
@@ -0,0 +1,51 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+'''
+Exceptions for the TOSCA Translator package.
+'''
+
+from rift.mano.tosca_translator.common.utils import _
+
+from toscaparser.common.exception import TOSCAException
+
+
+class ConfFileParseError(TOSCAException):
+    msg_fmt = _('%(message)s')
+
+
+class ConfOptionNotDefined(TOSCAException):
+    msg_fmt = _('Option %(key)s in section %(section)s '
+                'is not defined in conf file')
+
+
+class ConfSectionNotDefined(TOSCAException):
+    msg_fmt = _('Section %(section)s is not defined in conf file')
+
+
+class ToscaModImportError(TOSCAException):
+    msg_fmt = _('Unable to import module %(mod_name)s. '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class ToscaClassImportError(TOSCAException):
+    msg_fmt = _('Unable to import class %(name)s in '
+                'module %(mod_name)s. Check to see that it '
+                'exists and has no language definition errors.')
+
+
+class ToscaClassAttributeError(TOSCAException):
+    msg_fmt = _('Class attribute referenced not found. '
+                '%(message)s. Check to see that it is defined.')
diff --git a/common/python/rift/mano/tosca_translator/common/utils.py b/common/python/rift/mano/tosca_translator/common/utils.py
new file mode 100644 (file)
index 0000000..c0ed2d0
--- /dev/null
@@ -0,0 +1,456 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+
+import gettext
+import json
+import logging
+import math
+import numbers
+import os
+import re
+import requests
+from six.moves.urllib.parse import urlparse
+import yaml
+
+from hashlib import md5
+from hashlib import sha256
+
+import toscaparser.utils.yamlparser
+
+_localedir = os.environ.get('tosca-translator'.upper() + '_LOCALEDIR')
+_t = gettext.translation('tosca-translator', localedir=_localedir,
+                         fallback=True)
+
+
+def _(msg):
+    return _t.gettext(msg)
+
+
+YAML_ORDER_PARSER = toscaparser.utils.yamlparser.simple_ordered_parse
+log = logging.getLogger('tosca-translator')
+
+# Required environment variables to create openstackclient object.
+ENV_VARIABLES = ['OS_AUTH_URL', 'OS_PASSWORD', 'OS_USERNAME', 'OS_TENANT_NAME']
+
+
+class MemoryUnit(object):
+
+    UNIT_SIZE_DEFAULT = 'B'
+    UNIT_SIZE_DICT = {'B': 1, 'kB': 1000, 'KiB': 1024, 'MB': 1000000,
+                      'MiB': 1048576, 'GB': 1000000000,
+                      'GiB': 1073741824, 'TB': 1000000000000,
+                      'TiB': 1099511627776}
+
+    @staticmethod
+    def convert_unit_size_to_num(size, unit=None):
+        """Convert given size to a number representing given unit.
+
+        If unit is None, convert to a number representing UNIT_SIZE_DEFAULT
+        :param size: unit size e.g. 1 TB
+        :param unit: unit to be converted to e.g GB
+        :return: converted number e.g. 1000 for 1 TB size and unit GB
+        """
+        if unit:
+            unit = MemoryUnit.validate_unit(unit)
+        else:
+            unit = MemoryUnit.UNIT_SIZE_DEFAULT
+            log.info(_('A memory unit is not provided for size; using the '
+                       'default unit %(default)s.') % {'default': 'B'})
+        regex = re.compile('(\d*)\s*(\w*)')
+        result = regex.match(str(size)).groups()
+        if result[1]:
+            unit_size = MemoryUnit.validate_unit(result[1])
+            converted = int(str_to_num(result[0])
+                            * MemoryUnit.UNIT_SIZE_DICT[unit_size]
+                            * math.pow(MemoryUnit.UNIT_SIZE_DICT
+                                       [unit], -1))
+            log.info(_('Given size %(size)s is converted to %(num)s '
+                       '%(unit)s.') % {'size': size,
+                     'num': converted, 'unit': unit})
+        else:
+            converted = (str_to_num(result[0]))
+        return converted
+
+    @staticmethod
+    def validate_unit(unit):
+        if unit in MemoryUnit.UNIT_SIZE_DICT.keys():
+            return unit
+        else:
+            for key in MemoryUnit.UNIT_SIZE_DICT.keys():
+                if key.upper() == unit.upper():
+                    return key
+
+            msg = _('Provided unit "{0}" is not valid. The valid units are'
+                    ' {1}').format(unit, MemoryUnit.UNIT_SIZE_DICT.keys())
+            log.error(msg)
+            raise ValueError(msg)
+
+
+class CompareUtils(object):
+
+    MISMATCH_VALUE1_LABEL = "<Expected>"
+    MISMATCH_VALUE2_LABEL = "<Provided>"
+    ORDERLESS_LIST_KEYS = ['allowed_values', 'depends_on']
+
+    @staticmethod
+    def compare_dicts(dict1, dict2):
+        """Return False if not equal, True if both are equal."""
+
+        if dict1 is None and dict2 is None:
+            return True
+        if dict1 is None or dict2 is None:
+            return False
+
+        both_equal = True
+        for dict1_item, dict2_item in zip(dict1.items(), dict2.items()):
+            if dict1_item != dict2_item:
+                msg = (_("%(label1)s: %(item1)s \n is not equal to \n:"
+                         "%(label2)s: %(item2)s")
+                       % {'label1': CompareUtils.MISMATCH_VALUE2_LABEL,
+                          'item1': dict1_item,
+                          'label2': CompareUtils.MISMATCH_VALUE1_LABEL,
+                          'item2': dict2_item})
+                log.warning(msg)
+                both_equal = False
+                break
+        return both_equal
+
+    @staticmethod
+    def compare_mano_yamls(generated_yaml, expected_yaml):
+        mano_translated_dict = YAML_ORDER_PARSER(generated_yaml)
+        mano_expected_dict = YAML_ORDER_PARSER(expected_yaml)
+        return CompareUtils.compare_dicts(mano_translated_dict,
+                                          mano_expected_dict)
+
+    @staticmethod
+    def reorder(dic):
+        '''Canonicalize list items in the dictionary for ease of comparison.
+
+        For properties whose value is a list in which the order does not
+        matter, some pre-processing is required to bring those lists into a
+        canonical format. We use sorting just to make sure such differences
+        in ordering would not cause to a mismatch.
+        '''
+
+        if type(dic) is not dict:
+            return None
+
+        reordered = {}
+        for key in dic.keys():
+            value = dic[key]
+            if type(value) is dict:
+                reordered[key] = CompareUtils.reorder(value)
+            elif type(value) is list \
+                and key in CompareUtils.ORDERLESS_LIST_KEYS:
+                reordered[key] = sorted(value)
+            else:
+                reordered[key] = value
+        return reordered
+
+    @staticmethod
+    def diff_dicts(dict1, dict2, reorder=True):
+        '''Compares two dictionaries and returns their differences.
+
+        Returns a dictionary of mismatches between the two dictionaries.
+        An empty dictionary is returned if two dictionaries are equivalent.
+        The reorder parameter indicates whether reordering is required
+        before comparison or not.
+        '''
+
+        if reorder:
+            dict1 = CompareUtils.reorder(dict1)
+            dict2 = CompareUtils.reorder(dict2)
+
+        if dict1 is None and dict2 is None:
+            return {}
+        if dict1 is None or dict2 is None:
+            return {CompareUtils.MISMATCH_VALUE1_LABEL: dict1,
+                    CompareUtils.MISMATCH_VALUE2_LABEL: dict2}
+
+        diff = {}
+        keys1 = set(dict1.keys())
+        keys2 = set(dict2.keys())
+        for key in keys1.union(keys2):
+            if key in keys1 and key not in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: dict1[key],
+                             CompareUtils.MISMATCH_VALUE2_LABEL: None}
+            elif key not in keys1 and key in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: None,
+                             CompareUtils.MISMATCH_VALUE2_LABEL: dict2[key]}
+            else:
+                val1 = dict1[key]
+                val2 = dict2[key]
+                if val1 != val2:
+                    if type(val1) is dict and type(val2) is dict:
+                        diff[key] = CompareUtils.diff_dicts(val1, val2, False)
+                    else:
+                        diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: val1,
+                                     CompareUtils.MISMATCH_VALUE2_LABEL: val2}
+        return diff
+
+
+class YamlUtils(object):
+
+    @staticmethod
+    def get_dict(yaml_file):
+        '''Returns the dictionary representation of the given YAML spec.'''
+        try:
+            return yaml.load(open(yaml_file))
+        except IOError:
+            return None
+
+    @staticmethod
+    def compare_yamls(yaml1_file, yaml2_file):
+        '''Returns true if two dictionaries are equivalent, false otherwise.'''
+        dict1 = YamlUtils.get_dict(yaml1_file)
+        dict2 = YamlUtils.get_dict(yaml2_file)
+        return CompareUtils.compare_dicts(dict1, dict2)
+
+    @staticmethod
+    def compare_yaml_dict(yaml_file, dic):
+        '''Returns true if yaml matches the dictionary, false otherwise.'''
+        return CompareUtils.compare_dicts(YamlUtils.get_dict(yaml_file), dic)
+
+
+class TranslationUtils(object):
+
+    @staticmethod
+    def compare_tosca_translation_with_mano(tosca_file, mano_file, params):
+        '''Verify tosca translation against the given mano specification.
+
+        inputs:
+        tosca_file: relative local path or URL to the tosca input file
+        mano_file: relative path to expected mano output
+        params: dictionary of parameter name value pairs
+
+        Returns as a dictionary the difference between the MANO translation
+        of the given tosca_file and the given mano_file.
+        '''
+
+        from toscaparser.tosca_template import ToscaTemplate
+        from tosca_translator.mano.tosca_translator import TOSCATranslator
+
+        tosca_tpl = os.path.normpath(os.path.join(
+            os.path.dirname(os.path.abspath(__file__)), tosca_file))
+        a_file = os.path.isfile(tosca_tpl)
+        if not a_file:
+            tosca_tpl = tosca_file
+
+        expected_mano_tpl = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)), mano_file)
+
+        tosca = ToscaTemplate(tosca_tpl, params, a_file)
+        translate = TOSCATranslator(tosca, params)
+
+        output = translate.translate()
+        output_dict = toscaparser.utils.yamlparser.simple_parse(output)
+        expected_output_dict = YamlUtils.get_dict(expected_mano_tpl)
+        return CompareUtils.diff_dicts(output_dict, expected_output_dict)
+
+
+class UrlUtils(object):
+
+    @staticmethod
+    def validate_url(path):
+        """Validates whether the given path is a URL or not.
+
+        If the given path includes a scheme (http, https, ftp, ...) and a net
+        location (a domain name such as www.github.com) it is validated as a
+        URL.
+        """
+        parsed = urlparse(path)
+        return bool(parsed.scheme) and bool(parsed.netloc)
+
+
+class ChecksumUtils(object):
+
+    @staticmethod
+    def get_md5(input_file_name, log=None):
+        chunk_size = 1048576  # 1024 B * 1024 B = 1048576 B = 1 MB
+        file_md5_checksum = md5()
+        try:
+            with open(input_file_name, "rb") as f:
+                byte = f.read(chunk_size)
+                # previous_byte = byte
+                byte_size = len(byte)
+                file_read_iterations = 1
+                while byte:
+                    file_md5_checksum.update(byte)
+                    # previous_byte = byte
+                    byte = f.read(chunk_size)
+                    byte_size += len(byte)
+                    file_read_iterations += 1
+
+            cksum = file_md5_checksum.hexdigest()
+            if log:
+                log.debug(_("MD5 for {0} with size {1} (iter:{2}): {3}").
+                          format(input_file_name, byte_size,
+                                 file_read_iterations, cksum))
+            return cksum
+        except IOError:
+            if log:
+                log.error(_('File could not be opened: {0}').
+                          format(input_file_name))
+                return
+            else:
+                raise
+        except Exception as e:
+            raise e
+
+    @staticmethod
+    def get_sha256(input_file_name, log=None):
+        chunk_size = 1048576  # 1024 B * 1024 B = 1048576 B = 1 MB
+        file_sha256_checksum = sha256()
+        try:
+            with open(input_file_name, "rb") as f:
+                byte = f.read(chunk_size)
+                # previous_byte = byte
+                byte_size = len(byte)
+                file_read_iterations = 1
+                while byte:
+                    file_sha256_checksum.update(byte)
+                    # previous_byte = byte
+                    byte = f.read(chunk_size)
+                    byte_size += len(byte)
+                    file_read_iterations += 1
+
+            cksum = file_sha256_checksum.hexdigest()
+            if log:
+                log.debug(_("SHA256 for {0} with size {1} (iter:{2}): {3}").
+                          format(input_file_name, byte_size,
+                                 file_read_iterations, cksum))
+            return cksum
+        except IOError:
+            if log:
+                log.error(_('File could not be opened: {0}').
+                          format(input_file_name))
+                return
+            else:
+                raise
+        except Exception as e:
+            raise e
+
+
+def str_to_num(value):
+    """Convert a string representation of a number into a numeric type."""
+    if isinstance(value, numbers.Number):
+        return value
+    try:
+        return int(value)
+    except ValueError:
+        return float(value)
+
+
+def check_for_env_variables():
+    return set(ENV_VARIABLES) < set(os.environ.keys())
+
+
+def get_ks_access_dict():
+    tenant_name = os.getenv('OS_TENANT_NAME')
+    username = os.getenv('OS_USERNAME')
+    password = os.getenv('OS_PASSWORD')
+    auth_url = os.getenv('OS_AUTH_URL')
+
+    auth_dict = {
+        "auth": {
+            "tenantName": tenant_name,
+            "passwordCredentials": {
+                "username": username,
+                "password": password
+            }
+        }
+    }
+    headers = {'Content-Type': 'application/json'}
+    try:
+        keystone_response = requests.post(auth_url + '/tokens',
+                                          data=json.dumps(auth_dict),
+                                          headers=headers)
+        if keystone_response.status_code != 200:
+            return None
+        return json.loads(keystone_response.content)
+    except Exception:
+        return None
+
+
+def get_url_for(access_dict, service_type):
+    if access_dict is None:
+        return None
+    service_catalog = access_dict['access']['serviceCatalog']
+    service_url = ''
+    for service in service_catalog:
+        if service['type'] == service_type:
+            service_url = service['endpoints'][0]['publicURL']
+            break
+    return service_url
+
+
+def get_token_id(access_dict):
+    if access_dict is None:
+        return None
+    return access_dict['access']['token']['id']
+
+
+def map_name_to_python(name):
+    if name == 'type':
+        return 'type_yang'
+    return name.replace('-', '_')
+
+def convert_keys_to_python(d):
+    '''Change all keys from - to _'''
+    if isinstance(d, dict):
+        dic = {}
+        for key in d.keys():
+            dic[map_name_to_python(key)] = convert_keys_to_python(d[key])
+        return dic
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_python(memb))
+        return arr
+    else:
+        return d
+
+def map_name_to_yang (name):
+    return name.replace('_', '-')
+
+def convert_keys_to_yang(d):
+    '''Change all keys from _ to -'''
+    if isinstance(d, dict):
+        dic = {}
+        for key in d.keys():
+            dic[map_name_to_python(key)] = convert_keys_to_yang(d[key])
+        return dic
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_yang(memb))
+        return arr
+    else:
+        return d
+
+
+def dict_convert_values_to_str(d):
+    '''Convert all leaf values to str'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[key] = dict_convert_values_to_str(d[key])
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(dict_convert_values_to_str(memb))
+        return arr
+    else:
+        return str(d)
diff --git a/common/python/rift/mano/tosca_translator/compare_desc.py b/common/python/rift/mano/tosca_translator/compare_desc.py
new file mode 100644 (file)
index 0000000..0886b85
--- /dev/null
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['json']
+    INDENT = 2
+    DIFF_KEYS = (REMOVED_ITEMS, ADDED_ITEMS, TYPE_CHANGES, VALUES_CHANGED) = \
+                ('dic_item_removed', 'dic_item_added', 'type_changes',
+                 'values_changed')
+    DIFF_MAP = {REMOVED_ITEMS: 'Items removed',
+                ADDED_ITEMS: 'Items added',
+                TYPE_CHANGES: 'Changes in types',
+                VALUES_CHANGED: 'Changes in values'}
+    # Currently considering changes in removed keys or type changes
+    # as error.
+    ERROR_ITEMS = [REMOVED_ITEMS, TYPE_CHANGES]
+
+    def main(self, log, args):
+        self.log = log
+        print("Args: {}".format(args))
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated_file) as g:
+            gen_data = g.read()
+            json_gen = json.loads(gen_data)
+            self.log.debug(_("Generated: {0}").format(json_gen))
+
+        with open(args.expected_file) as e:
+            exp_data = e.read()
+            json_exp = json.loads(exp_data)
+            self.log.debug(_("Expected: {0}").format(json_exp))
+
+        diff = DeepDiff(json_exp, json_gen)
+        self.log.debug(_("Keys in diff: {0}").format(diff.keys()))
+        self.log.info(_("Differences:\n"))
+
+        d = pprint.pformat(diff, indent=self.INDENT)
+        self.log.info("Differences:\n{0}".format(d))
+
+        if len(set(self.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated-file",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected-file",
+        required=True,
+        help="Descriptor to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='json',
+        help="File type. Default json")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if args.debug:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.ERROR)
+    log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/conf/__init__.py b/common/python/rift/mano/tosca_translator/conf/__init__.py
new file mode 100644 (file)
index 0000000..db99bc7
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Initialize the global configuration for the translator '''
+
+import os
+
+from rift.mano.tosca_translator.conf.config import ConfigProvider
+
+CONF_FILENAME = 'translator.conf'
+
+
+def init_global_conf():
+    '''Initialize the configuration provider.
+
+    Allows the configuration to be shared throughout the translator code.
+    The file used is translator.conf, and is within the conf/ directory. It
+    is a standard ini format, and is prcessed using the ConfigParser module.
+
+    '''
+    conf_path = os.path.dirname(os.path.abspath(__file__))
+    conf_file = os.path.join(conf_path, CONF_FILENAME)
+    ConfigProvider._load_config(conf_file)
+
+
+init_global_conf()
diff --git a/common/python/rift/mano/tosca_translator/conf/config.py b/common/python/rift/mano/tosca_translator/conf/config.py
new file mode 100644 (file)
index 0000000..dd80a9c
--- /dev/null
@@ -0,0 +1,71 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Provide a global configuration for the TOSCA translator'''
+
+from six.moves import configparser
+
+import rift.mano.tosca_translator.common.exception as exception
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+class ConfigProvider(object):
+    '''Global config proxy that wraps a ConfigParser object.
+
+    Allows for class based access to config values. Should only be initialized
+    once using the corresponding translator.conf file in the conf directory.
+
+    '''
+
+    # List that captures all of the conf file sections.
+    # Append any new sections to this list.
+    _sections = ['DEFAULT']
+    _translator_config = None
+
+    @classmethod
+    def _load_config(cls, conf_file):
+        '''Private method only to be called once from the __init__ module'''
+
+        cls._translator_config = configparser.ConfigParser()
+        try:
+            cls._translator_config.read(conf_file)
+        except configparser.ParsingError:
+            msg = _('Unable to parse translator.conf file.'
+                    'Check to see that it exists in the conf directory.')
+            raise exception.ConfFileParseError(message=msg)
+
+    @classmethod
+    def get_value(cls, section, key):
+        try:
+            value = cls._translator_config.get(section, key)
+        except configparser.NoOptionError:
+            raise exception.ConfOptionNotDefined(key=key, section=section)
+        except configparser.NoSectionError:
+            raise exception.ConfSectionNotDefined(section=section)
+
+        return value
+
+    @classmethod
+    def get_all_values(cls):
+        values = []
+        for section in cls._sections:
+            try:
+                values.extend(cls._translator_config.items(section=section))
+            except configparser.NoOptionError:
+                raise exception.ConfSectionNotDefined(section=section)
+
+        return values
diff --git a/common/python/rift/mano/tosca_translator/conf/translator.conf b/common/python/rift/mano/tosca_translator/conf/translator.conf
new file mode 100644 (file)
index 0000000..95a416a
--- /dev/null
@@ -0,0 +1,4 @@
+[DEFAULT]
+
+# Relative path location for custom types
+custom_types_location=rift/mano/tosca_translator/custom/rwmano
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/custom/__init__.py b/common/python/rift/mano/tosca_translator/custom/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py b/common/python/rift/mano/tosca_translator/custom/rwmano/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/tosca_translator/rwmano/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_output.py
new file mode 100644 (file)
index 0000000..a065da1
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+class ManoOutput(object):
+    '''Attributes for RIFT.io MANO output section.'''
+
+    def __init__(self, log, name, value, description=None):
+        self.log = log
+        self.name = name
+        self.value = value
+        self.description = description
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.value)
+
+    def get_dict_output(self):
+        return {self.name: {'value': self.value,
+                            'description': self.description}}
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_parameter.py
new file mode 100644 (file)
index 0000000..aa6b83b
--- /dev/null
@@ -0,0 +1,62 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from collections import OrderedDict
+
+from rift.mano.tosca_translator.common.utils import _
+
+
+KEYS = (TYPE, DESCRIPTION, DEFAULT, CONSTRAINTS, HIDDEN, LABEL) = \
+       ('type', 'description', 'default', 'constraints', 'hidden', 'label')
+
+
+class ManoParameter(object):
+    '''Attributes for RIFT.io MANO parameter section.'''
+
+    def __init__(self, log, name, type, label=None, description=None,
+                 default=None, hidden=None, constraints=None):
+        self.log = log
+        self.name = name
+        self.type = type
+        self.label = label
+        self.description = description
+        self.default = default
+        self.hidden = hidden
+        self.constraints = constraints
+        log.info(_('Initialized the input parameters.'))
+
+    def __str__(self):
+        return "%s(%s,%s)" % (self.name, self.type, self.label)
+
+    # TODO(Philip): Harcoding for now, need to make this generic
+    def get_xpath(self):
+        xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+        return xpath
+
+    def get_dict_output(self):
+        param_sections = OrderedDict()
+        param_sections[TYPE] = self.type
+        if self.label:
+            param_sections[LABEL] = self.label
+        if self.description:
+            param_sections[DESCRIPTION] = self.description
+        if self.default:
+            param_sections[DEFAULT] = self.default
+        if self.hidden:
+            param_sections[HIDDEN] = self.hidden
+        if self.constraints:
+            param_sections[CONSTRAINTS] = self.constraints
+
+        return {self.name: param_sections}
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_resource.py
new file mode 100644 (file)
index 0000000..1606f7f
--- /dev/null
@@ -0,0 +1,374 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+import uuid
+
+from collections import OrderedDict
+
+import six
+
+from rift.mano.tosca_translator.common.utils import _
+
+from toscaparser.common.exception import ValidationError
+from toscaparser.elements.interfaces import InterfacesDef
+from toscaparser.functions import GetInput
+
+
+SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
+            DELETION_POLICY) = \
+           ('type', 'properties', 'metadata',
+            'depends_on', 'update_policy', 'deletion_policy')
+
+
+class ManoResource(object):
+    '''Base class for TOSCA node type translation to RIFT.io MANO type.'''
+
+    def __init__(self,
+                 log,
+                 nodetemplate,
+                 name=None,
+                 type_=None,
+                 properties=None,
+                 metadata=None,
+                 artifacts=None,
+                 depends_on=None,
+                 update_policy=None,
+                 deletion_policy=None):
+        self.log = log
+        self.nodetemplate = nodetemplate
+        if name:
+            self.name = name
+        else:
+            self.name = nodetemplate.name
+        self.type_ = type_
+        self._id = None
+        self._version = None
+        self.properties = properties or {}
+        self.metadata = metadata
+        self._artifacts = artifacts
+
+        # The difference between depends_on and depends_on_nodes is
+        # that depends_on defines dependency in the context of the
+        # HOT template and it is used during the template output.
+        # Depends_on_nodes defines the direct dependency between the
+        # tosca nodes and is not used during the output of the
+        # HOT template but for internal processing only. When a tosca
+        # node depends on another node it will be always added to
+        # depends_on_nodes but not always to depends_on. For example
+        # if the source of dependency is a server, the dependency will
+        # be added as properties.get_resource and not depends_on
+        if depends_on:
+            self.depends_on = depends_on
+            self.depends_on_nodes = depends_on
+        else:
+            self.depends_on = []
+            self.depends_on_nodes = []
+        self.update_policy = update_policy
+        self.deletion_policy = deletion_policy
+        self.group_dependencies = {}
+        self.operations = {}
+        # if hide_resource is set to true, then this resource will not be
+        # generated in the output yaml.
+        self.hide_resource = False
+        log.debug(_('Translating TOSCA node %(name)s of type %(type)s') %
+                  {'name': self.name,
+                   'type': self.type_})
+
+    # Added the below property menthods to support methods that
+    # works on both toscaparser.NodeType and translator.ManoResource
+    @property
+    def type(self):
+        return self.type_
+
+    @type.setter
+    def type(self, value):
+        self.type_ = value
+
+    def get_type(self):
+        return self.type_
+
+    @property
+    def id(self):
+        if self._id is None:
+            self._id = str(uuid.uuid1())
+        return self._id
+
+    @property
+    def description(self):
+        return _("Translated from TOSCA")
+
+    @property
+    def vendor(self):
+        if self._vendor is None:
+            if self.metadata and 'vendor' in self.metadata:
+                self._vendor = self.metadata['vendor']
+            else:
+                self._vendor = "RIFT.io"
+        return self._vendor
+
+    @property
+    def version(self):
+        if self._version is None:
+            if self.metadata and 'version' in self.metadata:
+                self._version = str(self.metadata['version'])
+            else:
+                self._version = '1.0'
+        return self._version
+
+    @property
+    def artifacts(self):
+        return self._artifacts
+
+    @artifacts.setter
+    def artifacts(self, value):
+        self._artifacts = value
+
+    def __str__(self):
+        return "%s(%s)"%(self.name, self.type)
+
+    def map_tosca_name_to_mano(self, name):
+        new_name = name.replace("_", "-")
+        return new_name
+
+    def map_keys_to_mano(self, d):
+        if isinstance(d, dict):
+            for key in d.keys():
+                d[self.map_tosca_name_to_mano(key)] = \
+                                    self.map_keys_to_mano(d.pop(key))
+            return d
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.map_keys_to_mano(memb))
+            return arr
+        else:
+            return d
+
+    def validate_properties(self, properties, required=None, optional=None):
+        if not isinstance(properties, dict):
+            err_msg = _("Properties for {0}({1}) is not right type"). \
+                      format(self.name, self.type_)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+        if required:
+            # Check if the required properties are present
+            if not set(required).issubset(properties.keys()):
+                for key in required:
+                    if key not in properties:
+                        err_msg = _("Property {0} is not defined "
+                                    "for {1}({2})"). \
+                                  format(key, self.name, self.type_)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+            # Check for unknown properties
+            for key in properties.keys():
+                if (key not in required or
+                    key not in optional):
+                    self.log.warn(_("Property {0} not supported for {1}({2}), "
+                                    "will be ignored.").
+                                  format(key, self.name, self.type_))
+
+    def handle_properties(self):
+        pass
+
+    def handle_artifacts(self):
+        pass
+
+    def handle_capabilities(self):
+        pass
+
+    def handle_requirements(self, nodes):
+        pass
+
+    def handle_interfaces(self):
+        pass
+
+    def update_image_checksum(self, in_file):
+        pass
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("{0}: Not doing anything for YANG model generation").
+                       format(self))
+
+    def get_supporting_files(self, files, desc_id=None):
+        pass
+
+    def top_of_chain(self):
+        dependent = self.group_dependencies.get(self)
+        if dependent is None:
+            return self
+        else:
+            return dependent.top_of_chain()
+
+    def get_dict_output(self):
+        resource_sections = OrderedDict()
+        resource_sections[TYPE] = self.type
+        if self.properties:
+            resource_sections[PROPERTIES] = self.properties
+        if self.metadata:
+            resource_sections[MEDADATA] = self.metadata
+        if self.depends_on:
+            resource_sections[DEPENDS_ON] = []
+            for depend in self.depends_on:
+                resource_sections[DEPENDS_ON].append(depend.name)
+        if self.update_policy:
+            resource_sections[UPDATE_POLICY] = self.update_policy
+        if self.deletion_policy:
+            resource_sections[DELETION_POLICY] = self.deletion_policy
+
+        return {self.name: resource_sections}
+
+    def get_tosca_props(self):
+        tosca_props = {}
+        for prop in self.nodetemplate.get_properties_objects():
+            if isinstance(prop.value, GetInput):
+                tosca_props[prop.name] = {'get_param': prop.value.input_name}
+            else:
+                tosca_props[prop.name] = prop.value
+        return tosca_props
+
+    def get_tosca_caps(self):
+        tosca_caps = {}
+        for cap in self.nodetemplate.get_capabilities_objects():
+            properties = cap.get_properties()
+            if len(properties):
+                tosca_caps[cap.name] = {}
+                for name in properties:
+                    tosca_caps[cap.name][name] = properties[name].value
+        return tosca_caps
+
+    def get_tosca_reqs(self):
+        tosca_reqs = []
+        for requirement in self.nodetemplate.requirements:
+            for endpoint, details in six.iteritems(requirement):
+                req = {}
+                relation = None
+                interfaces = None
+                if isinstance(details, dict):
+                    target = details.get('node')
+                    relation = details.get('relationship')
+                else:
+                    target = details
+                if (target and relation and
+                    not isinstance(relation, six.string_types)):
+                    interfaces = relation.get('interfaces')
+                req[endpoint] = {'target': target}
+                if relation:
+                    req[endpoint] = {'relation': relation}
+                if interfaces:
+                    req[endpoint] = {'interfaces': interfaces}
+            tosca_reqs.append(req)
+        return tosca_reqs
+
+    def get_property(self, args):
+        # TODO(Philip): Should figure out how to get this resolved
+        # by tosca-parser using GetProperty
+        if isinstance(args, list):
+            if len(args) == 2 and \
+               args[0] == 'SELF':
+                if args[1] in self.properties:
+                    return self.properties[args[1]]
+                else:
+                    self.log.error(_("{0}, property {} not defined").
+                                   format(self.name, args[1]))
+                    return
+        self.log.error(_("Get property for {0} of type {1} not supported").
+                       format(self.name, args))
+
+    def get_node_with_name(self, name, nodes):
+        """Get the node instance with specified name"""
+        for node in nodes:
+            if node.name == name:
+                return node
+
+    def get_nodes_related(self, target, type_, nodes):
+        """Get list of nodes related to target node"""
+        dep_nodes = []
+        for node in nodes:
+            if (node.name == target.name or
+                type_ != node.type):
+                continue
+            for rel in node.nodetemplate.related_nodes:
+                if rel.name == target.name:
+                    dep_nodes.append(node)
+                    break
+        return dep_nodes
+
+    def get_mano_attribute(self, attribute, args):
+        # this is a place holder and should be implemented by the subclass
+        # if translation is needed for the particular attribute
+        raise Exception(_("No translation in TOSCA type {0} for attribute "
+                          "{1}").format(self.nodetemplate.type, attribute))
+
+    @staticmethod
+    def _get_all_operations(node):
+        operations = {}
+        for operation in node.interfaces:
+            operations[operation.name] = operation
+
+        node_type = node.type_definition
+        if (isinstance(node_type, str) or
+            node_type.type == "tosca.policies.Placement"):
+            return operations
+
+        while True:
+            type_operations = ManoResource._get_interface_operations_from_type(
+                node_type, node, 'Standard')
+            type_operations.update(operations)
+            operations = type_operations
+
+            if node_type.parent_type is not None:
+                node_type = node_type.parent_type
+            else:
+                return operations
+
+    @staticmethod
+    def _get_interface_operations_from_type(node_type, node, lifecycle_name):
+        operations = {}
+        if (isinstance(node_type, str) or
+            node_type.type == "tosca.policies.Placement"):
+            return operations
+        if node_type.interfaces and lifecycle_name in node_type.interfaces:
+            for name, elems in node_type.interfaces[lifecycle_name].items():
+                # ignore empty operations (only type)
+                # ignore global interface inputs,
+                # concrete inputs are on the operations themselves
+                if name != 'type' and name != 'inputs':
+                    operations[name] = InterfacesDef(node_type,
+                                                     lifecycle_name,
+                                                     node, name, elems)
+        return operations
+
+    @staticmethod
+    def get_parent_type(node_type):
+        if node_type.parent_type is not None:
+            return node_type.parent_type
+        else:
+            return None
+
+    @staticmethod
+    def get_base_type(node_type):
+        parent_type = ManoResource.get_parent_type(node_type)
+        if parent_type is not None:
+            if parent_type.type.endswith('.Root'):
+                return node_type
+            else:
+                return ManoResource.get_base_type(parent_type)
+        else:
+            return node_type
diff --git a/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py b/common/python/rift/mano/tosca_translator/rwmano/syntax/mano_template.py
new file mode 100644 (file)
index 0000000..d263e6f
--- /dev/null
@@ -0,0 +1,262 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+import uuid
+
+import yaml
+
+from rift.mano.tosca_translator.common.utils import _
+
+from rift.mano.tosca_translator.common.utils import dict_convert_values_to_str
+
+try:
+    import gi
+    gi.require_version('RwYang', '1.0')
+    gi.require_version('RwNsdYang', '1.0')
+    gi.require_version('NsdYang', '1.0')
+
+    from gi.repository import NsdYang
+    from gi.repository import RwNsdYang
+    from gi.repository import RwYang
+except ImportError:
+    pass
+except ValueError as e:
+    pass
+
+
+class ManoTemplate(object):
+    '''Container for full RIFT.io MANO template.'''
+
+    YANG_NS = (NSD, VNFD) = ('nsd', 'vnfd')
+    OUTPUT_FIELDS = (NAME, ID, YANG, FILES) = ('name', 'id', 'yang', 'files')
+
+    def __init__(self, log):
+        self.log = log
+        self.resources = []
+        self.outputs = []
+        self.parameters = []
+        self.description = "Translated from TOSCA"
+        self.metadata = None
+        self.policies = []
+        self.groups = []
+
+    def output_to_yang(self, use_gi=False, indent=4):
+        self.log.debug(_('Converting translated output to yang model.'))
+
+        nsd_cat = None
+        nsd_id = str(uuid.uuid1())
+        vnfds = []
+
+        if use_gi:
+            try:
+                nsd_cat = RwNsdYang.YangData_Nsd_NsdCatalog()
+                nsd = nsd_cat.nsd.add()
+                nsd.id = nsd_id
+                nsd.name = self.metadata['name']
+                nsd.description = self.description
+                nsd.vendor = self.metadata['vendor']
+                nsd.short_name = self.metadata['name']
+                nsd.version = self.metadata['version']
+            except Exception as e:
+                self.log.warning(_("Unable to use YANG GI to generate "
+                                   "descriptors, falling back to alternate "
+                                   "method: {}").format(e))
+                self.log.exception(e)
+                use_gi = False
+
+        if not use_gi:
+            nsd = {
+                'id': nsd_id,
+                'name': self.metadata['name'],
+                'description': self.description,
+                'vendor': self.metadata['vendor'],
+                'short-name': self.metadata['name'],
+                'version': self.metadata['version'],
+            }
+
+        for resource in self.resources:
+            # Do the vlds first
+            if resource.type == 'vld':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for resource in self.resources:
+            # Do the vnfds next
+            if resource.type == 'vnfd':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for resource in self.resources:
+            # Do the other nodes
+            if resource.type != 'vnfd' and resource.type != 'vld':
+                resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for group in self.groups:
+            group.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        for policy in self.policies:
+            policy.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+
+        # Add input params to nsd
+        if use_gi:
+            for param in self.parameters:
+                nsd.input_parameter_xpath.append(
+                 NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath=param.get_xpath(),
+                    )
+                )
+        else:
+            nsd['input-parameter-xpath'] = []
+            for param in self.parameters:
+                nsd['input-parameter-xpath'].append(
+                    {'xpath': param.get_xpath()})
+
+        # Get list of supporting files referred in template
+        # Returned format is {desc_id: [{type: type, name: filename}]}
+        # TODO (pjoseph): Currently only images and scripts are retrieved.
+        # Need to add support to get script names, charms, etc.
+        other_files = {}
+        for resource in self.resources:
+            resource.get_supporting_files(other_files)
+
+        for policy in self.policies:
+            policy.get_supporting_files(other_files, desc_id=nsd_id)
+
+        self.log.debug(_("List of other files: {}".format(other_files)))
+
+        # Do the final processing and convert each descriptor to yaml string
+        tpl = {}
+
+        # Add the NSD
+        if use_gi:
+            nsd_pf = self.get_yaml(['nsd', 'rw-nsd'], nsd_cat)
+            nsd_id = nsd_cat.nsd[0].id
+            nsd_name = nsd_cat.nsd[0].name
+        else:
+            nsd_id = nsd['id']
+            nsd_name = nsd['name']
+
+            # In case of non gi proecssing,
+            # - convert all values to string
+            # - enclose in a catalog dict
+            # - prefix all keys with nsd or vnfd
+            # - Convert to YAML string
+            nsd_pf = yaml.dump(
+                self.prefix_dict(
+                    self.add_cat(dict_convert_values_to_str(nsd),
+                                 self.NSD),
+                    self.NSD),
+                default_flow_style=False)
+
+        nsd_out = {
+            self.NAME: nsd_name,
+            self.ID: nsd_id,
+            self.YANG: nsd_pf,
+        }
+
+        if nsd_id in other_files:
+            nsd_out[self.FILES] = other_files[nsd_id]
+
+        tpl[self.NSD] = [nsd_out]
+
+        # Add the VNFDs
+        tpl[self.VNFD] = []
+
+        for vnfd in vnfds:
+            if use_gi:
+                vnfd_pf = self.get_yaml(['vnfd', 'rw-vnfd'], vnfd)
+                vnfd_id = vnfd.vnfd[0].id
+                vnfd_name = vnfd.vnfd[0].name
+
+            else:
+                vnfd_id = vnfd['id']
+                vnfd_name = vnfd['name']
+
+                # In case of non gi proecssing,
+                # - convert all values to string
+                # - enclose in a catalog dict
+                # - prefix all keys with nsd or vnfd
+                # - Convert to YAML string
+                vnfd_pf = yaml.dump(
+                    self.prefix_dict(
+                        self.add_cat(dict_convert_values_to_str(vnfd),
+                                     self.VNFD),
+                        self.VNFD),
+                    default_flow_style=False)
+
+            vnfd_out = {
+                self.NAME: vnfd_name,
+                self.ID: vnfd_id,
+                self.YANG: vnfd_pf,
+            }
+
+            if vnfd_id in other_files:
+                vnfd_out[self.FILES] = other_files[vnfd_id]
+
+            tpl[self.VNFD].append(vnfd_out)
+
+        self.log.debug(_("NSD: {0}").format(tpl[self.NSD]))
+        self.log.debug(_("VNFDs:"))
+        for vnfd in tpl[self.VNFD]:
+            self.log.debug(_("{0}").format(vnfd))
+
+        return tpl
+
+    def _get_field(self, d, pf, field='name'):
+        '''Get the name given for the descriptor'''
+        # Search within the desc for a key pf:name
+        key = pf+':'+field
+        if isinstance(d, dict):
+            # If it is a dict, search for pf:name
+            if key in d:
+                return d[key]
+            else:
+                for k, v in d.items():
+                    result = self._get_field(v, pf, field)
+                    if result:
+                        return result
+        elif isinstance(d, list):
+            for memb in d:
+                result = self._get_field(memb, pf, field)
+                if result:
+                        return result
+
+    def prefix_dict(self, d, pf):
+        '''Prefix all keys of a dict with a specific prefix:'''
+        if isinstance(d, dict):
+            dic = {}
+            for key in d.keys():
+                # Only prefix keys without any prefix
+                # so later we can do custom prefixing
+                # which will not get overwritten here
+                if ':' not in key:
+                    dic[pf+':'+key] = self.prefix_dict(d[key], pf)
+                else:
+                    dic[key] = self.prefix_dict(d[key], pf)
+            return dic
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.prefix_dict(memb, pf))
+            return arr
+        else:
+            return d
+
+    def add_cat(self, desc, pf):
+        return {pf+'-catalog': {pf: [desc]}}
+
+    def get_yaml(self, module_list, desc):
+        model = RwYang.Model.create_libncx()
+        for module in module_list:
+            model.load_module(module)
+        return desc.to_yaml(model)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/__init__.py
new file mode 100755 (executable)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_compute.py
new file mode 100755 (executable)
index 0000000..2b244d7
--- /dev/null
@@ -0,0 +1,269 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+import os
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+from toscaparser.elements.scalarunit import ScalarUnit_Size
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaCompute'
+
+
+class ToscaCompute(ManoResource):
+    '''Translate TOSCA node type RIFT.io VDUs.'''
+
+    REQUIRED_PROPS = ['name', 'id', 'image', 'count', 'vm-flavor']
+    OPTIONAL_PROPS = [
+        'external-interface',
+        'image-checksum',
+        'cloud-init',
+        'cloud-init-file',]
+    IGNORE_PROPS = []
+
+    toscatype = 'tosca.nodes.Compute'
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaCompute, self).__init__(log,
+                                           nodetemplate,
+                                           type_='vdu',
+                                           metadata=metadata)
+        # List with associated port resources with this server
+        self.assoc_port_resources = []
+        self._image = None  # Image to bring up the VDU
+        self._image_cksum = None
+        self._cloud_init = None # Cloud init file
+        self._vnf = None
+        self._yang = None
+        self._id = self.name
+
+    @property
+    def image(self):
+        return self._image
+
+    @property
+    def cloud_init(self):
+        return self._cloud_init
+
+    @property
+    def vnf(self):
+        return self._vnf
+
+    @vnf.setter
+    def vnf(self, vnf):
+        if self._vnf:
+            err_msg = (_('VDU {0} already has a VNF {1} associated').
+                       format(self, self._vnf))
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+        self._vnf = vnf
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("VDU {0} tosca properties: {1}").
+                       format(self.name, tosca_props))
+        vdu_props = {}
+        for key, value in tosca_props.items():
+            if key == 'cloud_init':
+                vdu_props['cloud-init'] = value
+            elif key == 'cloud-init-file':
+                self._cloud_init = "../cloud_init/{}".format(value)
+            else:
+                vdu_props[key] = value
+
+        if 'name' not in vdu_props:
+            vdu_props['name'] = self.name
+
+        if 'id' not in vdu_props:
+            vdu_props['id'] = self.id
+
+        if 'count' not in vdu_props:
+            vdu_props['count'] = 1
+
+        self.log.debug(_("VDU {0} properties: {1}").
+                       format(self.name, vdu_props))
+        self.properties = vdu_props
+
+    def handle_capabilities(self):
+
+        def get_vm_flavor(specs):
+            vm_flavor = {}
+            if 'num_cpus' in specs:
+                vm_flavor['vcpu-count'] = specs['num_cpus']
+            else:
+                vm_flavor['vcpu-count'] = 1
+
+            if 'mem_size' in specs:
+                vm_flavor['memory-mb'] = (ScalarUnit_Size(specs['mem_size']).
+                                          get_num_from_scalar_unit('MB'))
+            else:
+                vm_flavor['memory-mb'] = 512
+
+            if 'disk_size' in specs:
+                vm_flavor['storage-gb'] = (ScalarUnit_Size(specs['disk_size']).
+                                           get_num_from_scalar_unit('GB'))
+            else:
+                vm_flavor['storage-gb'] = 4
+
+            return vm_flavor
+
+        tosca_caps = self.get_tosca_caps()
+        self.log.debug(_("VDU {0} tosca capabilites: {1}").
+                       format(self.name, tosca_caps))
+
+        if 'host' in tosca_caps:
+            self.properties['vm-flavor'] = get_vm_flavor(tosca_caps['host'])
+            self.log.debug(_("VDU {0} properties: {1}").
+                           format(self.name, self.properties))
+
+    def handle_artifacts(self):
+        if self.artifacts is None:
+            return
+        self.log.debug(_("VDU {0} tosca artifacts: {1}").
+                       format(self.name, self.artifacts))
+        arts = {}
+        for key in self.artifacts:
+            props = self.artifacts[key]
+            if isinstance(props, dict):
+                details = {}
+                for name, value in props.items():
+                    if name == 'type':
+                        prefix, type_ = value.rsplit('.', 1)
+                        if type_ == 'QCOW2':
+                            details['type'] = 'qcow2'
+                        else:
+                            err_msg = _("VDU {0}, Currently only QCOW2 images "
+                                        "are supported in artifacts ({1}:{2})"). \
+                                        format(self.name, key, value)
+                            self.log.error(err_msg)
+                            raise ValidationError(message=err_msg)
+                    elif name == 'file':
+                        details['file'] = value
+                    elif name == 'image_checksum':
+                        details['image_checksum'] = value
+                    else:
+                        self.log.warn(_("VDU {0}, unsuported attribute {1}").
+                                      format(self.name, name))
+                if len(details):
+                    arts[key] = details
+            else:
+                arts[key] = self.artifacts[key]
+
+        self.log.debug(_("VDU {0} artifacts: {1}").
+                       format(self.name, arts))
+        self.artifacts = arts
+
+    def handle_interfaces(self):
+        # Currently, we support only create operation
+        operations_deploy_sequence = ['create']
+
+        operations = ManoResource._get_all_operations(self.nodetemplate)
+
+        # use the current ManoResource for the first operation in this order
+        # Currently we only support image in create operation
+        for operation in operations.values():
+            if operation.name in operations_deploy_sequence:
+                self.operations[operation.name] = None
+                try:
+                    self.operations[operation.name] = operation.implementation
+                    for name, details in self.artifacts.items():
+                        if name == operation.implementation:
+                            self._image = details['file']
+                except KeyError as e:
+                    self.log.exception(e)
+        return None
+
+    def update_image_checksum(self, in_file):
+        # Create image checksum
+        # in_file is the TOSCA yaml file location
+        if self._image is None:
+            return
+        self.log.debug("Update image: {}".format(in_file))
+        if os.path.exists(in_file):
+            in_dir = os.path.dirname(in_file)
+            img_dir = os.path.dirname(self._image)
+            abs_dir = os.path.normpath(
+                os.path.join(in_dir, img_dir))
+            self.log.debug("Abs path: {}".format(abs_dir))
+            if os.path.isdir(abs_dir):
+                img_path = os.path.join(abs_dir,
+                                        os.path.basename(self._image))
+                self.log.debug(_("Image path: {0}").
+                               format(img_path))
+                if os.path.exists(img_path):
+                    # TODO (pjoseph): To be fixed when we can retrieve
+                    # the VNF image in Launchpad.
+                    # Check if the file is not size 0
+                    # else it is a dummy file and to be ignored
+                    if os.path.getsize(img_path) != 0:
+                        self._image_cksum = ChecksumUtils.get_md5(img_path,
+                                                                  log=self.log)
+
+    def get_mano_attribute(self, attribute, args):
+        attr = {}
+        # Convert from a TOSCA attribute for a nodetemplate to a MANO
+        # attribute for the matching resource.  Unless there is additional
+        # runtime support, this should be a one to one mapping.
+
+        # Note: We treat private and public IP  addresses equally, but
+        # this will change in the future when TOSCA starts to support
+        # multiple private/public IP addresses.
+        self.log.debug(_('Converting TOSCA attribute for a nodetemplate to a MANO \
+                  attriute.'))
+        if attribute == 'private_address' or \
+           attribute == 'public_address':
+                attr['get_attr'] = [self.name, 'networks', 'private', 0]
+
+        return attr
+
+    def _update_properties_for_model(self):
+        if self._image:
+            self.properties['image'] = os.path.basename(self._image)
+            if self._image_cksum:
+                self.properties['image-checksum'] = self._image_cksum
+
+        for key in ToscaCompute.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+    def generate_yang_submodel_gi(self, vnfd):
+        if vnfd is None:
+            return None
+        self._update_properties_for_model()
+        props = convert_keys_to_python(self.properties)
+        try:
+            vnfd.vdu.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vdu from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_submodel(self):
+        """Generate yang model for the VDU"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        self._update_properties_for_model()
+
+        vdu = self.properties
+
+        return vdu
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_config_primitives.py
new file mode 100644 (file)
index 0000000..b1a6ca2
--- /dev/null
@@ -0,0 +1,102 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaConfigPrimitives'
+
+
+class ToscaConfigPrimitives(ManoResource):
+    '''Translate TOSCA node type tosca.groups.riftio.config_primitives.'''
+
+    toscatype = 'tosca.groups.riftio.ConfigPrimitives'
+
+    def __init__(self, log, name, details, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        self.name = name
+        self.details = details
+        self.type_ = 'config-prim'
+        self.metadata = metadata
+        self.nodes = []
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes):
+        tosca_props = self.details['properties']
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+
+        members = self.details['members']
+        for member in members:
+            found = False
+            for node in nodes:
+                if member == node.name:
+                    self.nodes.append(node)
+                    found = True
+                    break
+            if not found:
+                err_msg = _("{0}: Did not find the member node {1} in "
+                            "resources list"). \
+                          format(self, node)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.primitives = tosca_props['primitives']
+
+    def get_primitive(self, primitive):
+        if primitive in self.primitives:
+            return self.primitives[primitive]
+
+    def validate_primitive(self, primitive):
+        if primitive in self.primitives:
+            return True
+        return False
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        for name, value in self.primitives.items():
+            prim = {'name': name}
+            props = convert_keys_to_python(value)
+            try:
+                prim.update(props)
+            except Exception as e:
+                err_msg = _("{0} Exception nsd config primitives {1}: {2}"). \
+                          format(self, props, e)
+                self.log.error(err_msg)
+                raise e
+            nsd.service_primitive.add().from_dict(prim)
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        nsd['service-primitive'] = []
+        for name, value in self.primitives.items():
+            prim = {'name': name}
+            prim.update(self.map_keys_to_mano(value))
+            nsd['service-primitive'].append(prim)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
new file mode 100644 (file)
index 0000000..7c03d56
--- /dev/null
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaInitialConfig'
+
+
+class ToscaInitialConfig(ManoResource):
+    '''Translate TOSCA node type tosca.policies.InitialConfigPrimitive.'''
+
+    toscatype = 'tosca.policies.riftio.InitialConfigPrimitive'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, primitive, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        for name, details in primitive.items():
+            self.name = name
+            self.details = details
+            break
+        self.type_ = 'initial-cfg'
+        self.metadata = metadata
+        self.properties = {}
+        self.scripts = []
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.details
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+        self.properties['name'] = tosca_props['name']
+        self.properties['seq'] = \
+                                tosca_props['seq']
+        self.properties['user-defined-script'] = \
+                                tosca_props['user_defined_script']
+        self.scripts.append('../scripts/{}'. \
+                            format(tosca_props['user_defined_script']))
+
+        if 'parameter' in tosca_props:
+            self.properties['parameter'] = []
+            for name, value in tosca_props['parameter'].items():
+                self.properties['parameter'].append({
+                    'name': name,
+                    'value': value,
+                })
+
+        self.log.debug(_("{0} properties: {1}").format(self, self.properties))
+
+    def get_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.initial_config_primitive.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception nsd initial config from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaInitialConfig.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.get_yang_model_gi(nsd, vnfds)
+
+        if 'initial-config-primitive' not in nsd:
+            nsd['initial-config-primitive'] = []
+        prim = {}
+        for key, value in self.properties.items():
+            prim[key] = value
+        nsd['initial-config-primitive'].append(prim)
+
+    def get_supporting_files(self, files, desc_id=None):
+        if not len(self.scripts):
+            return
+
+        if desc_id not in files:
+            files[desc_id] = []
+
+        for script in self.scripts:
+            files[desc_id].append({
+                'type': 'script',
+                'name': script,
+            },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_network.py
new file mode 100644 (file)
index 0000000..b446e51
--- /dev/null
@@ -0,0 +1,136 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNetwork'
+
+
+class ToscaNetwork(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.network.Network.'''
+
+    toscatype = 'tosca.nodes.network.Network'
+    NETWORK_PROPS = ['network_name', 'network_id']
+    REQUIRED_PROPS = ['name', 'id', 'type', 'version', 'short-name',
+                      'description', 'vendor']
+    OPTIONAL_PROPS = ['vnfd-connection-point-ref']
+    IGNORE_PROPS = ['ip_version', 'dhcp_enabled']
+    VALID_TYPES = ['ELAN']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNetwork, self).__init__(log,
+                                           nodetemplate,
+                                           type_='vld',
+                                           metadata=metadata)
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+
+        if 'cidr' in tosca_props.keys():
+            self.log.warn(_("Support for subnet not yet "
+                            "available. Ignoring it"))
+        net_props = {}
+        for key, value in tosca_props.items():
+            if key in self.NETWORK_PROPS:
+                if key == 'network_name':
+                    net_props['name'] = value
+                elif key == 'network_id':
+                    net_props['id'] = value
+            else:
+                net_props[key] = value
+
+        net_props['type'] = self.get_type()
+
+        if 'name' not in net_props:
+            # Use the node name as network name
+            net_props['name'] = self.name
+
+        if 'short_name' not in net_props:
+            # Use the node name as network name
+            net_props['short-name'] = self.name
+
+        if 'id' not in net_props:
+            net_props['id'] = self.id
+
+        if 'description' not in net_props:
+            net_props['description'] = self.description
+
+        if 'vendor' not in net_props:
+            net_props['vendor'] = self.vendor
+
+        if 'version' not in net_props:
+            net_props['version'] = self.version
+
+        self.log.debug(_("Network {0} properties: {1}").
+                       format(self.name, net_props))
+        self.properties = net_props
+
+    def get_type(self):
+        """Get the network type based on propery or type derived from"""
+        node = self.nodetemplate
+        tosca_props = self.get_tosca_props()
+        try:
+            if tosca_props['network_type'] in ToscaNetwork.VALID_TYPES:
+                return tosca_props['network_type']
+        except KeyError:
+            pass
+
+        node_type = node.type_definition
+
+        while node_type.type:
+            self.log.debug(_("Node name {0} with type {1}").
+                           format(self.name, node_type.type))
+            prefix, nw_type = node_type.type.rsplit('.', 1)
+            if nw_type in ToscaNetwork.VALID_TYPES:
+                return nw_type
+            else:
+                # Get the parent
+                node_type = ManoResource.get_parent_type(node_type)
+
+        return "ELAN"
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.vld.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vld from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        # Remove the props to be ignroed:
+        for key in ToscaNetwork.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        vld = self.properties
+
+        if 'vld' not in nsd:
+            nsd['vld'] = []
+        nsd['vld'].append(vld)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_network_port.py
new file mode 100644 (file)
index 0000000..04e3a59
--- /dev/null
@@ -0,0 +1,145 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNetworkPort'
+TOSCA_LINKS_TO = 'tosca.relationships.network.LinksTo'
+TOSCA_BINDS_TO = 'tosca.relationships.network.BindsTo'
+
+
+class ToscaNetworkPort(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.network.Port.'''
+
+    toscatype = 'tosca.nodes.network.Port'
+
+    VALID_TYPES = ['VIRTIO', 'VPORT']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNetworkPort, self).__init__(log,
+                                               nodetemplate,
+                                               type_='port',
+                                               metadata=metadata)
+        # Default order
+        self.order = 0
+        pass
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("Port {0} with tosca properties: {1}").
+                       format(self.name, tosca_props))
+        port_props = {}
+        for key, value in tosca_props.items():
+            port_props[key] = value
+
+        if 'cp_type' not in port_props:
+            port_props['cp_type'] = 'VPORT'
+        else:
+            if not port_props['cp_type'] in ToscaNetworkPort.VALID_TYPES:
+                err_msg = _("Invalid port type, {0}, specified for {1}"). \
+                          format(port_props['cp_type'], self.name)
+                self.log.warn(err_msg)
+                raise ValidationError(message=err_msg)
+
+        if 'vdu_intf_type' not in port_props:
+            port_props['vdu_intf_type'] = 'VIRTIO'
+        else:
+            if not port_props['vdu_intf_type'] in ToscaNetworkPort.VALID_TYPES:
+                err_msg = _("Invalid port type, {0}, specified for {1}"). \
+                          format(port_props['vdu_intf_type'], self.name)
+                self.log.warn(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.properties = port_props
+
+    def handle_requirements(self, nodes):
+        tosca_reqs = self.get_tosca_reqs()
+        self.log.debug("VNF {0} requirements: {1}".
+                       format(self.name, tosca_reqs))
+
+        vnf = None  # Need vnf ref to generate cp refs in vld
+        vld = None
+        if len(tosca_reqs) != 2:
+            err_msg = _("Invalid configuration as incorrect number of "
+                        "requirements for CP {0} are specified"). \
+                        format(self)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+        for req in tosca_reqs:
+            if 'virtualBinding' in req:
+                target = req['virtualBinding']['target']
+                node = self.get_node_with_name(target, nodes)
+                if node:
+                    vnf = node.vnf
+                    if not vnf:
+                        err_msg = _("No vnfs linked to a VDU {0}"). \
+                                    format(node)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+                    cp = {}
+                    cp['name'] = self.properties['name']
+                    cp['type'] = self.properties['cp_type']
+                    self.log.debug(_("Connection Point entry for VNF {0}:{1}").
+                                   format(vnf, cp))
+                    if 'connection-point' not in vnf.properties:
+                        vnf.properties['connection-point'] = []
+                    vnf.properties['connection-point'].append(cp)
+                    ext_intf = {}
+                    ext_intf['name'] = self.properties['vdu_intf_name']
+                    ext_intf['virtual-interface'] = \
+                                    {'type': self.properties['vdu_intf_type']}
+                    ext_intf['vnfd-connection-point-ref'] = \
+                                    self.properties['name']
+                    if 'external-interface' not in node.properties:
+                        node.properties['external-interface'] = []
+                    node.properties['external-interface'].append(ext_intf)
+                else:
+                    err_msg = _("Connection point {0}, VDU {1} "
+                                "specified not found"). \
+                                format(self.name, target)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+            elif 'virtualLink' in req:
+                target = req['virtualLink']['target']
+                node = self.get_node_with_name(target, nodes)
+                if node:
+                    vld = node
+                else:
+                    err_msg = _("CP {0}, VL {1} specified not found"). \
+                              format(self, target)
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+
+        if vnf and vld:
+            cp_ref = {}
+            cp_ref['vnfd-connection-point-ref'] = self.properties['name']
+            cp_ref['vnfd-id-ref'] = vnf.properties['id']
+            cp_ref['member-vnf-index-ref'] = \
+                            vnf._const_vnfd['member-vnf-index']
+            if 'vnfd-connection-point-ref' not in vld.properties:
+                vld.properties['vnfd-connection-point-ref'] = []
+            vld.properties['vnfd-connection-point-ref'].append(cp_ref)
+        else:
+            err_msg = _("CP {0}, VNF {1} or VL {2} not found"). \
+                      format(self, vnf, vld)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_nfv_vnf.py
new file mode 100644 (file)
index 0000000..3e52967
--- /dev/null
@@ -0,0 +1,305 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+try:
+    import gi
+    gi.require_version('RwVnfdYang', '1.0')
+
+    from gi.repository import RwVnfdYang
+except ImportError:
+    pass
+except ValueError:
+    pass
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaNfvVnf'
+
+
+class ToscaNfvVnf(ManoResource):
+    '''Translate TOSCA node type tosca.nodes.nfv.vnf.'''
+
+    toscatype = 'tosca.nodes.nfv.VNF'
+
+    REQUIRED_PROPS = ['name', 'short-name', 'id', 'short-name', 'description',
+                      'mgmt-interface']
+    OPTIONAL_PROPS = ['version', 'vendor', 'http-endpoint', 'monitoring-param',
+                      'connection-point']
+    IGNORE_PROPS = ['port']
+    TOSCA_CAPS = ['mgmt_interface', 'http_endpoint', 'monitoring_param_0',
+                  'monitoring_param_1', 'connection_point']
+
+    def __init__(self, log, nodetemplate, metadata=None):
+        super(ToscaNfvVnf, self).__init__(log,
+                                          nodetemplate,
+                                          type_="vnfd",
+                                          metadata=metadata)
+        self._const_vnfd = {}
+        self._vnf_config = {}
+        self._vdus = []
+
+    def map_tosca_name_to_mano(self, name):
+        new_name = super().map_tosca_name_to_mano(name)
+        if new_name.startswith('monitoring-param'):
+            new_name = 'monitoring-param'
+        if new_name == 'polling-interval':
+            new_name = 'polling_interval_secs'
+        return new_name
+
+    def handle_properties(self):
+        tosca_props = self.get_tosca_props()
+        self.log.debug(_("VNF {0} with tosca properties: {1}").
+                       format(self.name, tosca_props))
+
+        def get_vnf_config(config):
+            vnf_config = {}
+            for key, value in config.items():
+                new_key = self.map_tosca_name_to_mano(key)
+                if isinstance(value, dict):
+                    sub_config = {}
+                    for subkey, subvalue in value.items():
+                        sub_config[self.map_tosca_name_to_mano(subkey)] = \
+                                        subvalue
+                    vnf_config[new_key] = sub_config
+                else:
+                    vnf_config[new_key] = value
+
+            if vnf_config['config-type'] != 'script':
+                err_msg = _("{}, Only script config supported "
+                             "for now: {}"). \
+                           format(self, vnf_config['config-type'])
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+            # Replace config-details with actual name (config-type)
+            if ('config-type' in vnf_config and
+                'config-details' in vnf_config):
+                vnf_config[vnf_config['config-type']] = \
+                                    vnf_config.pop('config-details')
+                vnf_config.pop('config-type')
+
+            # Update config-delay and confgig-priortiy to correct struct
+            vnf_config['config-attributes'] = {}
+            if 'config-delay' in vnf_config:
+                vnf_config['config-attributes']['config-delay'] = \
+                            vnf_config.pop('config-delay')
+            else:
+                vnf_config['config-attributes']['config-delay'] = 0
+            if 'config-priority' in vnf_config:
+                vnf_config['config-attributes']['config-priority'] = \
+                            vnf_config.pop('config-priority')
+            return vnf_config
+
+        vnf_props = {}
+        for key, value in tosca_props.items():
+            if key == 'id':
+                self._const_vnfd['member-vnf-index'] = int(value)
+                self._const_vnfd['vnfd-id-ref'] = self.id
+            elif key == 'vnf_configuration':
+                self._vnf_config = get_vnf_config(value)
+            else:
+                vnf_props[key] = value
+
+        if 'name' not in vnf_props:
+            vnf_props['name'] = self.name
+
+        if 'short-name' not in vnf_props:
+            vnf_props['short-name'] = self.name
+
+        if 'id' not in vnf_props:
+            vnf_props['id'] = self.id
+
+        if 'vendor' not in vnf_props:
+            vnf_props['vendor'] = self.vendor
+
+        if 'description' not in vnf_props:
+            vnf_props['description'] = self.description
+
+        if 'start_by_default' in vnf_props:
+            self._const_vnfd['start-by-default'] = \
+                                        vnf_props.pop('start_by_default')
+
+        self.log.debug(_("VNF {0} with constituent vnf: {1}").
+                       format(self.name, self._const_vnfd))
+        self.log.debug(_("VNF {0} with properties: {1}").
+                       format(self.name, vnf_props))
+        self.properties = vnf_props
+
+    def handle_capabilities(self):
+        tosca_caps = self.get_tosca_caps()
+        self.log.debug(_("VDU {0} tosca capabilites: {1}").
+                       format(self.name, tosca_caps))
+
+        def get_props(props):
+            properties = {}
+            for key in props.keys():
+                value = props[key]
+                if isinstance(value, dict):
+                    if 'get_property' in value:
+                        val = self.get_property(value['get_property'])
+                        value = val
+                properties[self.map_tosca_name_to_mano(key)] = value
+            return properties
+
+        for key, value in tosca_caps.items():
+            if key in ToscaNfvVnf.TOSCA_CAPS:
+                new_key = self.map_tosca_name_to_mano(key)
+                props = get_props(value)
+                if 'id' in props:
+                    props['id'] = str(props['id'])
+                if 'protocol' in props:
+                    props.pop('protocol')
+
+                # There is only one instance of mgmt interface, but others
+                # are a list
+                if key == 'mgmt_interface':
+                    self.properties[new_key] = props
+                elif key == 'http_endpoint':
+                    if new_key not in self.properties:
+                        self.properties[new_key] = []
+                    self.properties[new_key].append(props)
+                else:
+                    if new_key not in self.properties:
+                        self.properties[new_key] = []
+                    self.properties[new_key].append(props)
+
+        self.log.debug(_("VDU {0} properties: {1}").
+                       format(self.name, self.properties))
+
+    def handle_requirements(self, nodes):
+        tosca_reqs = self.get_tosca_reqs()
+        self.log.debug("VNF {0} requirements: {1}".
+                       format(self.name, tosca_reqs))
+
+        try:
+            for req in tosca_reqs:
+                if 'vdus' in req:
+                    target = req['vdus']['target']
+                    node = self.get_node_with_name(target, nodes)
+                    if node:
+                        self._vdus.append(node)
+                        node._vnf = self
+                        # Add the VDU id to mgmt-intf
+                        if 'mgmt-interface' in self.properties:
+                            self.properties['mgmt-interface']['vdu-id'] = \
+                                            node.id
+                            if 'vdu' in self.properties['mgmt-interface']:
+                                # Older yang
+                                self.properties['mgmt-interface'].pop('vdu')
+                    else:
+                        err_msg = _("VNF {0}, VDU {1} specified not found"). \
+                                  format(self.name, target)
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+        except Exception as e:
+            err_msg = _("Exception getting VDUs for VNF {0}: {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            raise e
+
+        self.log.debug(_("VNF {0} properties: {1}").
+                       format(self.name, self.properties))
+
+    def generate_yang_model_gi(self, nsd, vnfds):
+        vnfd_cat = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+        vnfd = vnfd_cat.vnfd.add()
+        props = convert_keys_to_python(self.properties)
+        try:
+            vnfd.from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception updating vnfd from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+        vnfds.append(vnfd_cat)
+
+        # Update the VDU properties
+        for vdu in self._vdus:
+            vdu.generate_yang_submodel_gi(vnfd)
+
+        # Update constituent vnfd in nsd
+        try:
+            props = convert_keys_to_python(self._const_vnfd)
+            nsd.constituent_vnfd.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception constituent vnfd from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+        # Update the vnf configuration info in mgmt_interface
+        props = convert_keys_to_python(self._vnf_config)
+        try:
+            vnfd.vnf_configuration.from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception vnfd mgmt intf from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaNfvVnf.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.generate_yang_model_gi(nsd, vnfds)
+
+        vnfd = {}
+        vnfd.update(self.properties)
+        # Update vnf configuration on mgmt interface
+        vnfd['mgmt-interface']['vnf-configuration'] = self._vnf_config
+
+        # Update the VDU properties
+        vnfd['vdu'] = []
+        for vdu in self._vdus:
+            vnfd['vdu'].append(vdu.generate_yang_submodel())
+
+        vnfds.append(vnfd)
+
+        # Update constituent vnfd in nsd
+        if 'constituent-vnfd' not in nsd:
+            nsd['constituent-vnfd'] = []
+        nsd['constituent-vnfd'].append(self._const_vnfd)
+
+    def get_member_vnf_index(self):
+        return self._const_vnfd['member-vnf-index']
+
+    def get_supporting_files(self, files, desc_id=None):
+        files[self.id] = []
+        for vdu in self._vdus:
+            if vdu.image:
+                files[self.id].append({
+                    'type': 'image',
+                    'name': vdu.image,
+                },)
+            if vdu.cloud_init:
+                files[self.id].append({
+                    'type': 'cloud_init',
+                    'name': vdu.cloud_init,
+                },)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py b/common/python/rift/mano/tosca_translator/rwmano/tosca/tosca_scaling_group.py
new file mode 100644 (file)
index 0000000..25246af
--- /dev/null
@@ -0,0 +1,130 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaScalingGroup'
+
+
+class ToscaScalingGroup(ManoResource):
+    '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+    toscatype = 'tosca.policies.riftio.ScalingGroup'
+
+    IGNORE_PROPS = []
+
+    def __init__(self, log, policy, metadata=None):
+        # TODO(Philip):Not inheriting for ManoResource, as there is no
+        # instance from parser
+        self.log = log
+        for name, details in policy.items():
+            self.name = name
+            self.details = details
+            break
+        self.type_ = 'scale-grp'
+        self.metadata = metadata
+        self.properties = {}
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def handle_properties(self, nodes, groups):
+        tosca_props = self.details
+        self.log.debug(_("{0} with tosca properties: {1}").
+                       format(self, tosca_props))
+        self.properties['name'] = tosca_props['name']
+        self.properties['max-instance-count'] = \
+                                tosca_props['max_instance_count']
+        self.properties['min-instance-count'] = \
+                                tosca_props['min_instance_count']
+        self.properties['vnfd-member'] = []
+
+        def _get_node(name):
+            for node in nodes:
+                if node.name == name:
+                    return node
+
+        for member, count in tosca_props['vnfd_members'].items():
+            node = _get_node(member)
+            if node:
+                memb = {}
+                memb['member-vnf-index-ref'] = node.get_member_vnf_index()
+                memb['count'] = count
+                self.properties['vnfd-member'].append(memb)
+            else:
+                err_msg = _("{0}: Did not find the member node {1} in "
+                            "resources list"). \
+                          format(self, member)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        def _validate_action(action):
+            for group in groups:
+                if group.validate_primitive(action):
+                    return True
+            return False
+
+        self.properties['scaling-config-action'] = []
+        for action, value in tosca_props['config_actions'].items():
+            conf = {}
+            if _validate_action(value):
+                conf['trigger'] = action
+                conf['ns-config-primitive-name-ref'] = value
+                self.properties['scaling-config-action'].append(conf)
+            else:
+                err_msg = _("{0}: Did not find the action {1} in "
+                            "config primitives"). \
+                          format(self, action)
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        self.log.debug(_("{0} properties: {1}").format(self, self.properties))
+
+    def get_yang_model_gi(self, nsd, vnfds):
+        props = convert_keys_to_python(self.properties)
+        try:
+            nsd.scaling_group_descriptor.add().from_dict(props)
+        except Exception as e:
+            err_msg = _("{0} Exception nsd scaling group from dict {1}: {2}"). \
+                      format(self, props, e)
+            self.log.error(err_msg)
+            raise e
+
+    def generate_yang_model(self, nsd, vnfds, use_gi=False):
+        """Generate yang model for the node"""
+        self.log.debug(_("Generate YANG model for {0}").
+                       format(self))
+
+        for key in ToscaScalingGroup.IGNORE_PROPS:
+            if key in self.properties:
+                self.properties.pop(key)
+
+        if use_gi:
+            return self.get_yang_model_gi(nsd, vnfds)
+
+        if 'scaling-group-descriptor' not in nsd:
+            nsd['scaling-group-descriptor'] = []
+        scale = {}
+        for key, value in self.properties.items():
+            scale[key] = value
+        nsd['scaling-group-descriptor'].append(scale)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py b/common/python/rift/mano/tosca_translator/rwmano/tosca_translator.py
new file mode 100644 (file)
index 0000000..9c70a8a
--- /dev/null
@@ -0,0 +1,83 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_template import ManoTemplate
+from rift.mano.tosca_translator.rwmano.translate_inputs import TranslateInputs
+from rift.mano.tosca_translator.rwmano.translate_node_templates \
+    import TranslateNodeTemplates
+from rift.mano.tosca_translator.rwmano.translate_outputs \
+    import TranslateOutputs
+
+
+class TOSCATranslator(object):
+    '''Invokes translation methods.'''
+
+    def __init__(self, log, tosca, parsed_params, deploy=None, use_gi=False):
+        super(TOSCATranslator, self).__init__()
+        self.log = log
+        self.tosca = tosca
+        self.mano_template = ManoTemplate(log)
+        self.parsed_params = parsed_params
+        self.deploy = deploy
+        self.use_gi = use_gi
+        self.node_translator = None
+        log.info(_('Initialized parmaters for translation.'))
+
+    def translate(self):
+        self._resolve_input()
+        self.mano_template.description = self.tosca.description
+        self.mano_template.parameters = self._translate_inputs()
+        self.node_translator = TranslateNodeTemplates(self.log,
+                                                      self.tosca,
+                                                      self.mano_template)
+        self.mano_template.resources = self.node_translator.translate()
+        # TODO(Philip): Currently doing groups and policies seperately
+        # due to limitations with parser
+        self.mano_template.groups = self.node_translator.translate_groups()
+        self.mano_template.policies = self.node_translator.translate_policies()
+        self.mano_template.metadata = self.node_translator.metadata
+        # Currently we do not use outputs, so not processing them
+        # self.mano_template.outputs = self._translate_outputs()
+        return self.mano_template.output_to_yang(use_gi=self.use_gi)
+
+    def _translate_inputs(self):
+        translator = TranslateInputs(self.log,
+                                     self.tosca.inputs,
+                                     self.parsed_params,
+                                     self.deploy)
+        return translator.translate()
+
+    def _translate_outputs(self):
+        translator = TranslateOutputs(self.log,
+                                      self.tosca.outputs,
+                                      self.node_translator)
+        return translator.translate()
+
+    # check all properties for all node and ensure they are resolved
+    # to actual value
+    def _resolve_input(self):
+        for n in self.tosca.nodetemplates:
+            for node_prop in n.get_properties_objects():
+                if isinstance(node_prop.value, dict):
+                    if 'get_input' in node_prop.value:
+                        try:
+                            self.parsed_params[node_prop.value['get_input']]
+                        except Exception:
+                            msg = (_('Must specify all input values in '
+                                     'TOSCA template, missing %s.') %
+                                   node_prop.value['get_input'])
+                        self.log.error(msg)
+                        raise ValueError(msg)
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py b/common/python/rift/mano/tosca_translator/rwmano/translate_inputs.py
new file mode 100644 (file)
index 0000000..e5583d5
--- /dev/null
@@ -0,0 +1,172 @@
+# STANDARD_RIFT_IO_COPYRIGHT
+
+# Modified from https://github.com/openstack/heat-translator (APL 2.0)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_parameter import ManoParameter
+
+from toscaparser.dataentity import DataEntity
+from toscaparser.elements.scalarunit import ScalarUnit_Size
+from toscaparser.parameters import Input
+from toscaparser.utils.validateutils import TOSCAVersionProperty
+
+
+INPUT_CONSTRAINTS = (CONSTRAINTS, DESCRIPTION, LENGTH, RANGE,
+                     MIN, MAX, ALLOWED_VALUES, ALLOWED_PATTERN) = \
+                    ('constraints', 'description', 'length', 'range',
+                     'min', 'max', 'allowed_values', 'allowed_pattern')
+
+TOSCA_CONSTRAINT_OPERATORS = (EQUAL, GREATER_THAN, GREATER_OR_EQUAL, LESS_THAN,
+                              LESS_OR_EQUAL, IN_RANGE, VALID_VALUES, LENGTH,
+                              MIN_LENGTH, MAX_LENGTH, PATTERN) = \
+                             ('equal', 'greater_than', 'greater_or_equal',
+                              'less_than', 'less_or_equal', 'in_range',
+                              'valid_values', 'length', 'min_length',
+                              'max_length', 'pattern')
+
+TOSCA_TO_MANO_CONSTRAINTS_ATTRS = {'equal': 'allowed_values',
+                                   'greater_than': 'range',
+                                   'greater_or_equal': 'range',
+                                   'less_than': 'range',
+                                   'less_or_equal': 'range',
+                                   'in_range': 'range',
+                                   'valid_values': 'allowed_values',
+                                   'length': 'length',
+                                   'min_length': 'length',
+                                   'max_length': 'length',
+                                   'pattern': 'allowed_pattern'}
+
+TOSCA_TO_MANO_INPUT_TYPES = {'string': 'string',
+                             'integer': 'number',
+                             'float': 'number',
+                             'boolean': 'boolean',
+                             'timestamp': 'string',
+                             'scalar-unit.size': 'number',
+                             'version': 'string',
+                             'null': 'string',
+                             'PortDef': 'number'}
+
+
+class TranslateInputs(object):
+
+    '''Translate TOSCA Inputs to RIFT MANO input Parameters.'''
+
+    def __init__(self, log, inputs, parsed_params, deploy=None):
+        self.log = log
+        self.inputs = inputs
+        self.parsed_params = parsed_params
+        self.deploy = deploy
+
+    def translate(self):
+        return self._translate_inputs()
+
+    def _translate_inputs(self):
+        mano_inputs = []
+        if 'key_name' in self.parsed_params and 'key_name' not in self.inputs:
+            name = 'key_name'
+            type = 'string'
+            default = self.parsed_params[name]
+            schema_dict = {'type': type, 'default': default}
+            input = Input(name, schema_dict)
+            self.inputs.append(input)
+
+        self.log.info(_('Translating TOSCA input type to MANO input type.'))
+        for input in self.inputs:
+            mano_default = None
+            mano_input_type = TOSCA_TO_MANO_INPUT_TYPES[input.type]
+
+            if input.name in self.parsed_params:
+                mano_default = DataEntity.validate_datatype(
+                    input.type, self.parsed_params[input.name])
+            elif input.default is not None:
+                mano_default = DataEntity.validate_datatype(input.type,
+                                                            input.default)
+            else:
+                if self.deploy:
+                    msg = _("Need to specify a value "
+                            "for input {0}.").format(input.name)
+                    self.log.error(msg)
+                    raise Exception(msg)
+            if input.type == "scalar-unit.size":
+                # Assumption here is to use this scalar-unit.size for size of
+                # cinder volume in heat templates and will be in GB.
+                # should add logic to support other types if needed.
+                input_value = mano_default
+                mano_default = (ScalarUnit_Size(mano_default).
+                                get_num_from_scalar_unit('GiB'))
+                if mano_default == 0:
+                    msg = _('Unit value should be > 0.')
+                    self.log.error(msg)
+                    raise Exception(msg)
+                elif int(mano_default) < mano_default:
+                    mano_default = int(mano_default) + 1
+                    self.log.warning(_("Cinder unit value should be in"
+                                       " multiples of GBs. So corrected"
+                                       " %(input_value)s to %(mano_default)s"
+                                       " GB.")
+                                     % {'input_value': input_value,
+                                        'mano_default': mano_default})
+            if input.type == 'version':
+                mano_default = TOSCAVersionProperty(mano_default).get_version()
+
+            mano_constraints = []
+            if input.constraints:
+                for constraint in input.constraints:
+                    if mano_default:
+                        constraint.validate(mano_default)
+                    hc, hvalue = self._translate_constraints(
+                        constraint.constraint_key, constraint.constraint_value)
+                    mano_constraints.append({hc: hvalue})
+
+            mano_inputs.append(ManoParameter(self.log,
+                                             name=input.name,
+                                             type=mano_input_type,
+                                             description=input.description,
+                                             default=mano_default,
+                                             constraints=mano_constraints))
+        return mano_inputs
+
+    def _translate_constraints(self, name, value):
+        mano_constraint = TOSCA_TO_MANO_CONSTRAINTS_ATTRS[name]
+
+        # Offset used to support less_than and greater_than.
+        # TODO(anyone):  when parser supports float, verify this works
+        offset = 1
+
+        if name == EQUAL:
+            mano_value = [value]
+        elif name == GREATER_THAN:
+            mano_value = {"min": value + offset}
+        elif name == GREATER_OR_EQUAL:
+            mano_value = {"min": value}
+        elif name == LESS_THAN:
+            mano_value = {"max": value - offset}
+        elif name == LESS_OR_EQUAL:
+            mano_value = {"max": value}
+        elif name == IN_RANGE:
+            # value is list type here
+            min_value = min(value)
+            max_value = max(value)
+            mano_value = {"min": min_value, "max": max_value}
+        elif name == LENGTH:
+            mano_value = {"min": value, "max": value}
+        elif name == MIN_LENGTH:
+            mano_value = {"min": value}
+        elif name == MAX_LENGTH:
+            mano_value = {"max": value}
+        else:
+            mano_value = value
+        return mano_constraint, mano_value
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py b/common/python/rift/mano/tosca_translator/rwmano/translate_node_templates.py
new file mode 100644 (file)
index 0000000..dbfaa62
--- /dev/null
@@ -0,0 +1,328 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import importlib
+import os
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.exception import ToscaClassAttributeError
+from rift.mano.tosca_translator.common.exception import ToscaClassImportError
+from rift.mano.tosca_translator.common.exception import ToscaModImportError
+from rift.mano.tosca_translator.conf.config import ConfigProvider as translatorConfig
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+
+
+class TranslateNodeTemplates(object):
+    '''Translate TOSCA NodeTemplates to RIFT.io MANO Resources.'''
+
+    ##################
+    # Module constants
+    ##################
+
+    TOSCA_TO_MANO_REQUIRES = {'container': 'server',
+                              'host': 'server',
+                              'dependency': 'depends_on',
+                              'connects': 'depends_on'}
+
+    TOSCA_TO_MANO_PROPERTIES = {'properties': 'input'}
+
+    TOSCA_TO_MANO_TYPE = None
+
+    ###########################
+    # Module utility Functions
+    # for dynamic class loading
+    ###########################
+
+    def _load_classes(log, locations, classes):
+        '''Dynamically load all the classes from the given locations.'''
+
+        for cls_path in locations:
+            # Use the absolute path of the class path
+            abs_path = os.path.dirname(os.path.abspath(__file__))
+            abs_path = abs_path.replace('rift/mano/tosca_translator/rwmano', cls_path)
+            log.debug(_("Loading classes from %s") % abs_path)
+
+            # Grab all the tosca type module files in the given path
+            mod_files = [f for f in os.listdir(abs_path) if (
+                f.endswith('.py') and
+                not f.startswith('__init__') and
+                f.startswith('tosca_'))]
+
+            # For each module, pick out the target translation class
+            for f in mod_files:
+                # NOTE: For some reason the existing code does not use
+                # the map to instantiate
+                # ToscaBlockStorageAttachment. Don't add it to the map
+                # here until the dependent code is fixed to use the
+                # map.
+                if f == 'tosca_block_storage_attachment.py':
+                    continue
+
+                # mod_name = cls_path + '/' + f.rstrip('.py')
+                # Above have an issue if the mod name ends with p or y
+                f_name, ext = f.rsplit('.', 1)
+                mod_name = cls_path + '/' + f_name
+                mod_name = mod_name.replace('/', '.')
+                try:
+                    mod = importlib.import_module(mod_name)
+                    target_name = getattr(mod, 'TARGET_CLASS_NAME')
+                    clazz = getattr(mod, target_name)
+                    classes.append(clazz)
+                except ImportError:
+                    raise ToscaModImportError(mod_name=mod_name)
+                except AttributeError:
+                    if target_name:
+                        raise ToscaClassImportError(name=target_name,
+                                                    mod_name=mod_name)
+                    else:
+                        # TARGET_CLASS_NAME is not defined in module.
+                        # Re-raise the exception
+                        raise
+
+    def _generate_type_map(log):
+        '''Generate TOSCA translation types map.
+
+        Load user defined classes from location path specified in conf file.
+        Base classes are located within the tosca directory.
+        '''
+
+        # Base types directory
+        BASE_PATH = 'rift/mano/tosca_translator/rwmano/tosca'
+
+        # Custom types directory defined in conf file
+        custom_path = translatorConfig.get_value('DEFAULT',
+                                                 'custom_types_location')
+
+        # First need to load the parent module, for example 'contrib.mano',
+        # for all of the dynamically loaded classes.
+        classes = []
+        TranslateNodeTemplates._load_classes(log,
+                                             (BASE_PATH, custom_path),
+                                             classes)
+        try:
+            types_map = {clazz.toscatype: clazz for clazz in classes}
+            log.debug(_("Type maps loaded: {}").format(types_map.keys()))
+        except AttributeError as e:
+            raise ToscaClassAttributeError(message=e.message)
+
+        return types_map
+
+    def __init__(self, log, tosca, mano_template):
+        self.log = log
+        self.tosca = tosca
+        self.nodetemplates = self.tosca.nodetemplates
+        self.mano_template = mano_template
+        # list of all MANO resources generated
+        self.mano_resources = []
+        self.mano_policies = []
+        self.mano_groups = []
+        # mapping between TOSCA nodetemplate and MANO resource
+        log.debug(_('Mapping between TOSCA nodetemplate and MANO resource.'))
+        self.mano_lookup = {}
+        self.policies = self.tosca.topology_template.policies
+        self.groups = self.tosca.topology_template.groups
+        self.metadata = {}
+
+    def translate(self):
+        if TranslateNodeTemplates.TOSCA_TO_MANO_TYPE is None:
+            TranslateNodeTemplates.TOSCA_TO_MANO_TYPE = \
+                TranslateNodeTemplates._generate_type_map(self.log)
+        # Translate metadata
+        self.translate_metadata()
+        return self._translate_nodetemplates()
+
+    def translate_metadata(self):
+        """Translate and store the metadata in instance"""
+        FIELDS_MAP = {
+            'ID': 'name',
+            'vendor': 'vendor',
+            'version': 'version',
+        }
+        metadata = {}
+        # Initialize to default values
+        metadata['name'] = 'tosca_to_mano'
+        metadata['vendor'] = 'RIFT.io'
+        metadata['version'] = '1.0'
+        if 'metadata' in self.tosca.tpl:
+            tosca_meta = self.tosca.tpl['metadata']
+            for key in FIELDS_MAP:
+                if key in tosca_meta.keys():
+                    metadata[FIELDS_MAP[key]] = str(tosca_meta[key])
+        self.log.debug(_("Metadata {0}").format(metadata))
+        self.metadata = metadata
+
+    def _recursive_handle_properties(self, resource):
+        '''Recursively handle the properties of the depends_on_nodes nodes.'''
+        # Use of hashtable (dict) here should be faster?
+        if resource in self.processed_resources:
+            return
+        self.processed_resources.append(resource)
+        for depend_on in resource.depends_on_nodes:
+            self._recursive_handle_properties(depend_on)
+
+        if resource.type == "OS::Nova::ServerGroup":
+            resource.handle_properties(self.mano_resources)
+        else:
+            resource.handle_properties()
+
+    def _get_policy_type(self, policy):
+        if isinstance(policy, dict):
+            for key, details in policy.items():
+                if 'type' in details:
+                    return details['type']
+
+    def _translate_nodetemplates(self):
+
+        self.log.debug(_('Translating the node templates.'))
+        # Copy the TOSCA graph: nodetemplate
+        tpl = self.tosca.tpl['topology_template']['node_templates']
+        for node in self.nodetemplates:
+            base_type = ManoResource.get_base_type(node.type_definition)
+            self.log.debug(_("Translate node %(name)s of type %(type)s with "
+                             "base %(base)s") %
+                           {'name': node.name,
+                            'type': node.type,
+                            'base': base_type.type})
+            mano_node = TranslateNodeTemplates. \
+                        TOSCA_TO_MANO_TYPE[base_type.type](
+                            self.log,
+                            node,
+                            metadata=self.metadata)
+            # Currently tosca-parser does not add the artifacts
+            # to the node
+            if mano_node.name in tpl:
+                tpl_node = tpl[mano_node.name]
+                self.log.debug("Check artifacts for {}".format(tpl_node))
+                if 'artifacts' in tpl_node:
+                    mano_node.artifacts = tpl_node['artifacts']
+            self.mano_resources.append(mano_node)
+            self.mano_lookup[node] = mano_node
+
+        # The parser currently do not generate the objects for groups
+        if 'groups' in self.tosca.tpl['topology_template']:
+            tpl = self.tosca.tpl['topology_template']['groups']
+            self.log.debug("Groups: {}".format(tpl))
+            for group, details in tpl.items():
+                self.log.debug(_("Translate group {}: {}").
+                               format(group, details))
+                group_type = details['type']
+                if group_type:
+                    group_node = TranslateNodeTemplates. \
+                                 TOSCA_TO_MANO_TYPE[group_type](
+                                     self.log,
+                                     group,
+                                     details,
+                                     metadata=self.metadata)
+                    self.mano_groups.append(group_node)
+
+        # The parser currently do not generate the objects for policies
+        if 'policies' in self.tosca.tpl['topology_template']:
+            tpl = self.tosca.tpl['topology_template']['policies']
+            # for policy in self.policies:
+            for policy in tpl:
+                self.log.debug(_("Translate policy {}").
+                               format(policy))
+                policy_type = self._get_policy_type(policy)
+                if policy_type:
+                    policy_node = TranslateNodeTemplates. \
+                                  TOSCA_TO_MANO_TYPE[policy_type](
+                                      self.log,
+                                      policy,
+                                      metadata=self.metadata)
+                    self.mano_policies.append(policy_node)
+
+        for node in self.mano_resources:
+            self.log.debug(_("Handle properties for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_properties()
+
+            self.log.debug(_("Handle capabilites for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_capabilities()
+
+            self.log.debug(_("Handle aritfacts for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_artifacts()
+
+            self.log.debug(_("Handle interfaces for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.handle_interfaces()
+
+            self.log.debug(_("Update image checksum for {0} of type {1}").
+                           format(node.name, node.type_))
+            node.update_image_checksum(self.tosca.path)
+
+        for node in self.mano_resources:
+            # Handle vnf and vdu dependencies first
+            if node.type == "vnfd":
+                try:
+                    self.log.debug(_("Handle requirements for {0} of "
+                                     "type {1}").
+                                   format(node.name, node.type_))
+                    node.handle_requirements(self.mano_resources)
+                except Exception as e:
+                    self.log.error(_("Exception for {0} in requirements {1}").
+                                   format(node.name, node.type_))
+                    self.log.exception(e)
+
+        for node in self.mano_resources:
+            # Now handle other dependencies
+            if node.type != "vnfd":
+                try:
+                    self.log.debug(_("Handle requirements for {0} of type {1}").
+                                   format(node.name, node.type_))
+                    node.handle_requirements(self.mano_resources)
+                except Exception as e:
+                    self.log.error(_("Exception for {0} in requirements {1}").
+                                   format(node.name, node.type_))
+                    self.log.exception(e)
+
+        return self.mano_resources
+
+    def translate_groups(self):
+        for group in self.mano_groups:
+            group.handle_properties(self.mano_resources)
+        return self.mano_groups
+
+    def translate_policies(self):
+        for policy in self.mano_policies:
+            policy.handle_properties(self.mano_resources, self.mano_groups)
+        return self.mano_policies
+
+    def find_mano_resource(self, name):
+        for resource in self.mano_resources:
+            if resource.name == name:
+                return resource
+
+    def _find_tosca_node(self, tosca_name):
+        for node in self.nodetemplates:
+            if node.name == tosca_name:
+                return node
+
+    def _find_mano_resource_for_tosca(self, tosca_name,
+                                      current_mano_resource=None):
+        if tosca_name == 'SELF':
+            return current_mano_resource
+        if tosca_name == 'HOST' and current_mano_resource is not None:
+            for req in current_mano_resource.nodetemplate.requirements:
+                if 'host' in req:
+                    return self._find_mano_resource_for_tosca(req['host'])
+
+        for node in self.nodetemplates:
+            if node.name == tosca_name:
+                return self.mano_lookup[node]
+
+        return None
diff --git a/common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py b/common/python/rift/mano/tosca_translator/rwmano/translate_outputs.py
new file mode 100644 (file)
index 0000000..d684492
--- /dev/null
@@ -0,0 +1,47 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.rwmano.syntax.mano_output import ManoOutput
+
+
+class TranslateOutputs(object):
+    '''Translate TOSCA Outputs to Heat Outputs.'''
+
+    def __init__(self, log, outputs, node_translator):
+        log.debug(_('Translating TOSCA outputs to MANO outputs.'))
+        self.log = log
+        self.outputs = outputs
+        self.nodes = node_translator
+
+    def translate(self):
+        return self._translate_outputs()
+
+    def _translate_outputs(self):
+        mano_outputs = []
+        for output in self.outputs:
+            if output.value.name == 'get_attribute':
+                get_parameters = output.value.args
+                mano_target = self.nodes.find_mano_resource(get_parameters[0])
+                mano_value = mano_target.get_mano_attribute(get_parameters[1],
+                                                            get_parameters)
+                mano_outputs.append(ManoOutput(output.name,
+                                               mano_value,
+                                               output.description))
+            else:
+                mano_outputs.append(ManoOutput(output.name,
+                                               output.value,
+                                               output.description))
+        return mano_outputs
diff --git a/common/python/rift/mano/tosca_translator/shell.py b/common/python/rift/mano/tosca_translator/shell.py
new file mode 100644 (file)
index 0000000..9221c79
--- /dev/null
@@ -0,0 +1,515 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import logging.config
+import os
+import shutil
+import stat
+import subprocess
+import tempfile
+import zipfile
+
+import magic
+
+import yaml
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+from rift.mano.tosca_translator.rwmano.syntax.mano_template import ManoTemplate
+from rift.mano.tosca_translator.rwmano.tosca_translator import TOSCATranslator
+
+from toscaparser.tosca_template import ToscaTemplate
+
+
+"""
+Test the tosca translation from command line as:
+#translator
+  --template-file=<path to the YAML template or CSAR>
+  --template-type=<type of template e.g. tosca>
+  --parameters="purpose=test"
+  --output_dir=<output directory>
+  --archive
+  --validate_only
+Takes following user arguments,
+. Path to the file that needs to be translated (required)
+. Input parameters (optional)
+. Write to output files in a dir (optional), else print on screen
+. Create archive or not
+
+In order to use translator to only validate template,
+without actual translation, pass --validate-only along with
+other required arguments.
+
+"""
+
+
+class ToscaShellError(Exception):
+    pass
+
+
+class ToscaEntryFileError(ToscaShellError):
+    pass
+
+
+class ToscaNoEntryDefinitionError(ToscaShellError):
+    pass
+
+
+class ToscaEntryFileNotFoundError(ToscaShellError):
+    pass
+
+
+class ToscaCreateArchiveError(ToscaShellError):
+    pass
+
+
+class TranslatorShell(object):
+
+    SUPPORTED_TYPES = ['tosca']
+    COPY_DIRS = ['images']
+    SUPPORTED_INPUTS = (YAML, ZIP) = ('yaml', 'zip')
+
+    def __init__(self, log=None):
+        self.log = log
+
+    def main(self, raw_args=None):
+        args = self._parse_args(raw_args)
+
+        if self.log is None:
+            if args.debug:
+                logging.basicConfig(level=logging.DEBUG)
+            else:
+                logging.basicConfig(level=logging.ERROR)
+            self.log = logging.getLogger("tosca-translator")
+
+        self.template_file = args.template_file
+
+        parsed_params = {}
+        if args.parameters:
+            parsed_params = self._parse_parameters(args.parameters)
+
+        self.archive = False
+        if args.archive:
+            self.archive = True
+
+        self.tmpdir = None
+
+        if args.validate_only:
+            a_file = os.path.isfile(args.template_file)
+            tpl = ToscaTemplate(self.template_file, parsed_params, a_file)
+            self.log.debug(_('Template = {}').format(tpl.__dict__))
+            msg = (_('The input {} successfully passed ' \
+                     'validation.').format(self.template_file))
+            print(msg)
+        else:
+            self.use_gi = not args.no_gi
+            tpl = self._translate("tosca", parsed_params)
+            if tpl:
+                return self._write_output(tpl, args.output_dir)
+
+    def translate(self,
+                  template_file,
+                  output_dir=None,
+                  use_gi=True,
+                  archive=False,):
+        self.template_file = template_file
+
+        # Check the input file
+        path = os.path.abspath(template_file)
+        self.in_file = path
+        a_file = os.path.isfile(path)
+        if not a_file:
+            msg = _("The path {0} is not a valid file.").format(template_file)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+        # Get the file type
+        self.ftype = self._get_file_type()
+        self.log.debug(_("Input file {0} is of type {1}").
+                       format(path, self.ftype))
+
+        self.archive = archive
+
+        self.tmpdir = None
+
+        self.use_gi = use_gi
+
+        tpl = self._translate("tosca", {})
+        if tpl:
+            return self._write_output(tpl, output_dir)
+
+    def _parse_args(self, raw_args=None):
+        parser = argparse.ArgumentParser(
+            description='RIFT TOSCA translator for descriptors')
+
+        parser.add_argument(
+            "-f",
+            "--template-file",
+            required=True,
+            help="Template file to translate")
+
+        parser.add_argument(
+            "-o",
+            "--output-dir",
+            help="Directory to output")
+
+        parser.add_argument(
+            "-p", "--parameters",
+            help="Input parameters")
+
+        parser.add_argument(
+            "-a", "--archive",
+            action="store_true",
+            help="Archive the translated files")
+
+        parser.add_argument(
+            "--no-gi",
+            help="Do not use the YANG GI to generate descriptors",
+            action="store_true")
+
+        parser.add_argument(
+            "--validate-only",
+            help="Validate template, no translation",
+            action="store_true")
+
+        parser.add_argument(
+            "--debug",
+            help="Enable debug logging",
+            action="store_true")
+
+        if raw_args:
+            args = parser.parse_args(raw_args)
+        else:
+            args = parser.parse_args()
+        return args
+
+    def _parse_parameters(self, parameter_list):
+        parsed_inputs = {}
+        if parameter_list:
+            # Parameters are semi-colon separated
+            inputs = parameter_list.replace('"', '').split(';')
+            # Each parameter should be an assignment
+            for param in inputs:
+                keyvalue = param.split('=')
+                # Validate the parameter has both a name and value
+                msg = _("'%(param)s' is not a well-formed parameter.") % {
+                    'param': param}
+                if keyvalue.__len__() is 2:
+                    # Assure parameter name is not zero-length or whitespace
+                    stripped_name = keyvalue[0].strip()
+                    if not stripped_name:
+                        self.log.error(msg)
+                        raise ValueError(msg)
+                    # Add the valid parameter to the dictionary
+                    parsed_inputs[keyvalue[0]] = keyvalue[1]
+                else:
+                    self.log.error(msg)
+                    raise ValueError(msg)
+        return parsed_inputs
+
+    def get_entry_file(self):
+        # Extract the archive and get the entry file
+        if self.ftype == self.YAML:
+            return self.in_file
+
+        self.prefix = ''
+        if self.ftype == self.ZIP:
+            self.tmpdir = tempfile.mkdtemp()
+            prevdir = os.getcwd()
+            try:
+                with zipfile.ZipFile(self.in_file) as zf:
+                    self.prefix = os.path.commonprefix(zf.namelist())
+                    self.log.debug(_("Zipfile prefix is {0}").
+                                   format(self.prefix))
+                    zf.extractall(self.tmpdir)
+
+                    # Set the execute bits on scripts as zipfile
+                    # does not restore the permissions bits
+                    os.chdir(self.tmpdir)
+                    for fname in zf.namelist():
+                        if (fname.startswith('scripts/') and
+                            os.path.isfile(fname)):
+                            # Assume this is a script file
+                            # Give all permissions to owner and read+execute
+                            # for group and others
+                            os.chmod(fname,
+                                     stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH)
+
+                # TODO (pjoseph): Use the below code instead of extract all
+                # once unzip is installed on launchpad VMs
+                # zfile = os.path.abspath(self.in_file)
+                # os.chdir(self.tmpdir)
+                # zip_cmd = "unzip {}".format(zfile)
+                # subprocess.check_call(zip_cmd,
+                #                       #stdout=subprocess.PIPE,
+                #                       #stderr=subprocess.PIPE,
+                #                       shell=True,)
+
+            except Exception as e:
+                msg = _("Exception extracting input file {0}: {1}"). \
+                      format(self.in_file, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                os.chdir(prevdir)
+                shutil.rmtree(self.tmpdir)
+                self.tmpdir = None
+                raise ToscaEntryFileError(msg)
+
+        os.chdir(self.tmpdir)
+
+        try:
+            # Goto the TOSAC Metadata file
+            prefix_dir = os.path.join(self.tmpdir, self.prefix)
+            meta_file = os.path.join(prefix_dir, 'TOSCA-Metadata',
+                                     'TOSCA.meta')
+            self.log.debug(_("Checking metadata file {0}").format(meta_file))
+            if not os.path.exists(meta_file):
+                self.log.error(_("Not able to find metadata file in archive"))
+                return
+
+            # Open the metadata file and get the entry file
+            with open(meta_file, 'r') as f:
+                meta = yaml.load(f)
+
+                if 'Entry-Definitions' in meta:
+                    entry_file = os.path.join(prefix_dir,
+                                              meta['Entry-Definitions'])
+                    if os.path.exists(entry_file):
+                        self.log.debug(_("TOSCA entry file is {0}").
+                                       format(entry_file))
+                        return entry_file
+
+                    else:
+                        msg = _("Unable to get the entry file: {0}"). \
+                              format(entry_file)
+                        self.log.error(msg)
+                        raise ToscaEntryFileNotFoundError(msg)
+
+                else:
+                    msg = _("Did not find entry definition " \
+                            "in metadata: {0}").format(meta)
+                    self.log.error(msg)
+                    raise ToscaNoEntryDefinitionError(msg)
+
+        except Exception as e:
+            msg = _('Exception parsing metadata file {0}: {1}'). \
+                  format(meta_file, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise ToscaEntryFileError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+    def _translate(self, sourcetype, parsed_params):
+        output = None
+
+        # Check the input file
+        path = os.path.abspath(self.template_file)
+        self.in_file = path
+        a_file = os.path.isfile(path)
+        if not a_file:
+            msg = _("The path {} is not a valid file."). \
+                  format(self.template_file)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+        # Get the file type
+        self.ftype = self._get_file_type()
+        self.log.debug(_("Input file {0} is of type {1}").
+                       format(path, self.ftype))
+
+        if sourcetype == "tosca":
+            entry_file = self.get_entry_file()
+            if entry_file:
+                self.log.debug(_('Loading the tosca template.'))
+                tosca = ToscaTemplate(entry_file, parsed_params, True)
+                self.log.debug(_('TOSCA Template: {}').format(tosca.__dict__))
+                translator = TOSCATranslator(self.log, tosca, parsed_params,
+                                         use_gi=self.use_gi)
+                self.log.debug(_('Translating the tosca template.'))
+                output = translator.translate()
+        return output
+
+    def _copy_supporting_files(self, output_dir, files):
+        # Copy supporting files, if present in archive
+        if self.tmpdir:
+            # The files are refered relative to the definitions directory
+            arc_dir = os.path.join(self.tmpdir,
+                                   self.prefix,
+                                   'Definitions')
+            prevdir = os.getcwd()
+            try:
+                os.chdir(arc_dir)
+                for fn in files:
+                    fname = fn['name']
+                    fpath = os.path.abspath(fname)
+                    ty = fn['type']
+                    if ty == 'image':
+                        dest = os.path.join(output_dir, 'images')
+                    elif ty == 'script':
+                        dest = os.path.join(output_dir, 'scripts')
+                    elif ty == 'cloud_init':
+                        dest = os.path.join(output_dir, 'cloud_init')
+                    else:
+                        self.log.warn(_("Unknown file type {0} for {1}").
+                                      format(ty, fname))
+                        continue
+
+                    self.log.debug(_("File type {0} copy from {1} to {2}").
+                                   format(ty, fpath, dest))
+                    if os.path.exists(fpath):
+                        # Copy the files to the appropriate dir
+                        self.log.debug(_("Copy file(s) {0} to {1}").
+                                         format(fpath, dest))
+                        if os.path.isdir(fpath):
+                            # Copy directory structure like charm dir
+                            shutil.copytree(fpath, dest)
+                        else:
+                            # Copy a single file
+                            os.makedirs(dest, exist_ok=True)
+                            shutil.copy2(fpath, dest)
+
+                    else:
+                        self.log.warn(_("Could not find file {0} at {1}").
+                                      format(fname, fpath))
+
+            except Exception as e:
+                self.log.error(_("Exception copying files {0}: {1}").
+                               format(arc_dir, e))
+                self.log.exception(e)
+
+            finally:
+                os.chdir(prevdir)
+
+    def _create_checksum_file(self, output_dir):
+        # Create checkum for all files
+        flist = {}
+        for root, dirs, files in os.walk(output_dir):
+            rel_dir = root.replace(output_dir, '').lstrip('/')
+
+            for f in files:
+                fpath = os.path.join(root, f)
+                # TODO (pjoseph): To be fixed when we can
+                # retrieve image files from Launchpad
+                if os.path.getsize(fpath) != 0:
+                    flist[os.path.join(rel_dir, f)] = \
+                                                ChecksumUtils.get_md5(fpath)
+                    self.log.debug(_("Files in output_dir: {}").format(flist))
+
+                chksumfile = os.path.join(output_dir, 'checksums.txt')
+                with open(chksumfile, 'w') as c:
+                    for key in sorted(flist.keys()):
+                        c.write("{}  {}\n".format(flist[key], key))
+
+    def _create_archive(self, desc_id, output_dir):
+        """Create a tar.gz archive for the descriptor"""
+        aname = desc_id + '.tar.gz'
+        apath = os.path.join(output_dir, aname)
+        self.log.debug(_("Generating archive: {}").format(apath))
+
+        prevdir = os.getcwd()
+        os.chdir(output_dir)
+
+        # Generate the archive
+        tar_cmd = "tar zcvf {} {}".format(apath, desc_id)
+        self.log.debug(_("Generate archive: {}").format(tar_cmd))
+
+        try:
+            subprocess.check_call(tar_cmd,
+                                  stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE,
+                                  shell=True)
+            return apath
+
+        except subprocess.CalledProcessError as e:
+            msg = _("Error creating archive with {}: {}"). \
+                           format(tar_cmd, e)
+            self.log.error(msg)
+            raise ToscaCreateArchiveError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+    def _write_output(self, output, output_dir=None):
+        out_files = []
+
+        if output_dir:
+            output_dir = os.path.abspath(output_dir)
+
+        if output:
+            # Do the VNFDs first and then NSDs as later when
+            # loading in launchpad, VNFDs need to be loaded first
+            for key in [ManoTemplate.VNFD, ManoTemplate.NSD]:
+                for desc in output[key]:
+                    if output_dir:
+                        desc_id = desc[ManoTemplate.ID]
+                        # Create separate directories for each descriptors
+                        # Use the descriptor id to avoid name clash
+                        subdir = os.path.join(output_dir, desc_id)
+                        os.makedirs(subdir)
+
+                        output_file = os.path.join(subdir,
+                                            desc[ManoTemplate.NAME]+'.yml')
+                        self.log.debug(_("Writing file {0}").
+                                       format(output_file))
+                        with open(output_file, 'w+') as f:
+                            f.write(desc[ManoTemplate.YANG])
+
+                        if ManoTemplate.FILES in desc:
+                            self._copy_supporting_files(subdir,
+                                                desc[ManoTemplate.FILES])
+
+                        if self.archive:
+                            # Create checksum file
+                            self._create_checksum_file(subdir)
+                            out_files.append(self._create_archive(desc_id,
+                                                                  output_dir))
+                            # Remove the desc directory
+                            shutil.rmtree(subdir)
+                    else:
+                        print(_("Descriptor {0}:\n{1}").
+                              format(desc[ManoTemplate.NAME],
+                                     desc[ManoTemplate.YANG]))
+
+            if output_dir and self.archive:
+                # Return the list of archive files
+                return out_files
+
+    def _get_file_type(self):
+        m = magic.open(magic.MAGIC_MIME)
+        m.load()
+        typ = m.file(self.in_file)
+        if typ.startswith('text/plain'):
+            # Assume to be yaml
+            return self.YAML
+        elif typ.startswith('application/zip'):
+            return self.ZIP
+        else:
+            msg = _("The file {0} is not a supported type: {1}"). \
+                  format(self.in_file, typ)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None, log=None):
+    TranslatorShell(log=log).main(raw_args=args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip
new file mode 100644 (file)
index 0000000..e91aecd
Binary files /dev/null and b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar.zip differ
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/ping_pong_nsd.yaml
new file mode 100644 (file)
index 0000000..9a68023
--- /dev/null
@@ -0,0 +1,390 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: Toy NS
+metadata:
+  ID: ping_pong_nsd
+  vendor: RIFT.io
+  version: 1.0
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_delay:
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: no
+        type: integer
+      config_details:
+        type: map
+      config_priority:
+        constraints:
+        - greater_than: 0
+        type: integer
+      config_template:
+        required: no
+        type: string
+      config_type:
+        type: string
+capability_types:
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+      vdu:
+        type: string
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      path:
+        type: string
+      polling_interval:
+        type: integer
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      description:
+        type: string
+      group_tag:
+        default: Group1
+        type: string
+      http_endpoint_ref:
+        type: string
+      id:
+        type: integer
+      json_query_method:
+        default: NAMEKEY
+        type: string
+      name:
+        type: string
+      units:
+        type: string
+      value_type:
+        default: INT
+        type: string
+      widget_type:
+        default: COUNTER
+        type: string
+node_types:
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+      name:
+        description: Name of the connection point
+        type: string
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: Type of the interface on VDU
+        type: string
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      port:
+        constraints:
+        - in_range: [1, 65535]
+        type: integer
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+      start_by_default:
+        type: boolean
+        default: true
+    capabilities:
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+    - vdus:
+        node: tosca.nodes.riftio.VDU1
+        occurences: [1, UNBOUND]
+        relationship: tosca.relationships.nfv.VirtualLinksTo
+        type: tosca.capabilities.nfv.VirtualLinkable
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      cloud_init:
+        default: #cloud-config
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.policies.Root
+    properties:
+      primitive: map
+policy_types:
+  tosca.policies.riftio.InitialConfigPrimitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      parameter:
+        type: map
+      seq:
+        type: integer
+      user_defined_script:
+        type: string
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      config_actions:
+        type: map
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      name:
+        type: string
+      vnfd_members:
+        type: map
+topology_template:
+  policies:
+  - scaling_group_descriptor:
+      config_actions:
+        post_scale_out: ping config
+      max_instance_count: 10
+      min_instance_count: 1
+      name: ping_group
+      type: tosca.policies.riftio.ScalingGroup
+      vnfd_members:
+        ping_vnfd: 1
+  - initial_config_primitive:
+      name: start traffic
+      seq: 1
+      type: tosca.policies.riftio.InitialConfigPrimitive
+      user_defined_script: start_traffic.py
+  groups:
+    config_primitive:
+      type: tosca.groups.riftio.ConfigPrimitives
+      members:
+      - ping_vnfd
+      - pong_vnfd
+      properties:
+        primitives:
+          ping config:
+            user_defined_script: ping_config.py
+  inputs:
+    vendor:
+      type: string
+      description: Translated from YANG
+  node_templates:
+    ping_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 1
+        port: 18888
+        start_by_default: false
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 0
+          config_details:
+            script_type: bash
+          config_priority: 2
+          config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
+            ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
+            \ to configure VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server info for\
+            \ ping!\"\n    exit $rc\nfi\n\ncurl -D /dev/stdout \\\n    -H \"Accept:\
+            \ application/vnd.yang.data+xml\" \\\n    -H \"Content-Type: application/vnd.yang.data+json\"\
+            \ \\\n    -X POST \\\n    -d \"{\\\"rate\\\":$ping_rate}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set ping rate!\"\n\
+            \    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/ping/stats
+            polling_interval: 2
+            port: 18888
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/ping/stats
+              port: 18888
+            port: 18888
+            protocol: tcp
+            vdu: ping_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: ping_vnfd_iovdu_0
+    pong_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, pong.service ]\n  - [ systemctl, start, --no-block, pong.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        pong_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: pong_vnfd_iovdu_0_vm_image
+    pong_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: pong_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: pong_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    ping_pong_vld:
+      type: tosca.nodes.riftio.VL1
+      properties:
+        description: Toy VL
+        vendor: RIFT.io
+    ping_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: ping_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: ping_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    pong_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 2
+        port: 18889
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 60
+          config_details:
+            script_type: bash
+          config_priority: 1
+          config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
+            pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
+            \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
+            \ VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server(own) info\
+            \ for pong!\"\n    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/pong/stats
+            polling_interval: 2
+            port: 18889
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/pong/stats
+              port: 18889
+            port: 18889
+            protocol: tcp
+            vdu: pong_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: pong_vnfd_iovdu_0
+    ping_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, ping.service ]\n  - [ systemctl, start, --no-block, ping.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        ping_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: ping_vnfd_iovdu_0_vm_image
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/Definitions/riftio_custom_types.yaml
new file mode 100644 (file)
index 0000000..494a16d
--- /dev/null
@@ -0,0 +1,156 @@
+tosca_definitions_version:      tosca_simple_profile_for_nfv_1_0_0
+#tosca_default_namespace :    # Optional. default namespace (schema, types version)
+
+description: Define RIFT.io custom types
+
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_type:
+        type: string
+      config_delay:
+        type: integer
+        default: 0
+        required: no
+        constraints:
+          - greater_or_equal: 0
+      config_priority:
+        type: integer
+        constraints:
+          - greater_than: 0
+      config_details:
+        type: map
+      config_template:
+        type: string
+        required: no
+
+capability_types:
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      polling_interval:
+        type: integer
+        #type: scalar_unit.time
+      path:
+        type: string
+
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      id:
+        type: integer
+      name:
+        type: string
+      value_type:
+        type: string
+        default: INT
+      group_tag:
+        type: string
+        default: Group1
+      units:
+        type: string
+      description:
+        type: string
+      json_query_method:
+        type: string
+        default: NAMEKEY
+      http_endpoint_ref:
+        type: string
+      widget_type:
+        type: string
+        default: COUNTER
+
+node_types:
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      #vdu_ref:
+      #  type: list
+      #  description: VDUs this VNF references
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+      port:
+        type: integer
+        constraints:
+          - in_range: [1, 65535]
+    capabilities:
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      # Have not figured out how to do a list for capabilities
+      # If you specify multiple cpabilites of same type the
+      # last one is only available in parser
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+      - vdus:
+          type: tosca.capabilities.nfv.VirtualLinkable
+          relationship: tosca.relationships.nfv.VirtualLinksTo
+          node: tosca.nodes.riftio.VDU1
+          occurrences: [ 1, UNBOUND ]
+
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      count:
+        type: integer
+        default: 1
+      cloud_init :
+        type: string
+        default: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ ifup, eth1 ]\n"
+    capabilities:
+      virtual_linkable:
+        type: tosca.capabilities.nfv.VirtualLinkable
+
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      name:
+        type: string
+        description: Name of the connection point
+      cp_type:
+        type: string
+        description: Type of connection point
+      vdu_intf_name:
+        type: string
+        description: Name of interface on VDU
+      vdu_intf_type:
+        type: string
+        description: Type of interface on VDU
+
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.groups.Root
+    properties:
+      primitive:
+        type: map
+
+policy_types:
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      vnfd_members:
+        type: map
+      config_actions:
+        type: map
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/TOSCA-Metadata/TOSCA.meta
new file mode 100644 (file)
index 0000000..2351efd
--- /dev/null
@@ -0,0 +1,4 @@
+TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/ping_pong_nsd.yaml
\ No newline at end of file
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2 b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2 b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README b/common/python/rift/mano/tosca_translator/test/data/ping_pong_csar/images/README
new file mode 100644 (file)
index 0000000..16356a0
--- /dev/null
@@ -0,0 +1 @@
+Dummy images for unit testing
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld.yaml
new file mode 100644 (file)
index 0000000..5b913ff
--- /dev/null
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+topology_template:
+  node_templates:
+    my_server:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 2
+           disk_size: 10 GB
+           mem_size: 512 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: Linux
+            distribution: RHEL
+            version: 6.5
diff --git a/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml b/common/python/rift/mano/tosca_translator/test/data/tosca_helloworld_invalid.yaml
new file mode 100644 (file)
index 0000000..ea60733
--- /dev/null
@@ -0,0 +1,23 @@
+tosca_definitions: tosca_simple_yaml_1_0
+
+description: Template with invalid version and topology_template section.
+
+topology_template:
+  node_temp:
+    my_server:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 2
+           disk_size: 10 GB
+           mem_size: 512 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: Linux
+            distribution: RHEL
+            version: 6.5
diff --git a/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py b/common/python/rift/mano/tosca_translator/test/tosca_translator_ut.py
new file mode 100755 (executable)
index 0000000..1b5b156
--- /dev/null
@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+Unittest for TOSCA tranlator to RIFT.io YANG model
+'''
+
+import argparse
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import xmlrunner
+
+import unittest
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+from rift.mano.tosca_translator.common.utils import _
+import rift.mano.tosca_translator.shell as shell
+
+from rift.mano.utils.compare_desc import CompareDescShell
+
+from rift.package import convert
+
+from toscaparser.common.exception import TOSCAException
+
+
+_TRUE_VALUES = ('True', 'true', '1', 'yes')
+
+
+class PingPongDescriptors(object):
+    def __init__(self):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                )
+        self.ping_pong_nsd = nsd.descriptor.nsd[0]
+        self.ping_vnfd = ping_vnfd.descriptor.vnfd[0]
+        self.pong_vnfd = pong_vnfd.descriptor.vnfd[0]
+
+class TestToscaTranslator(unittest.TestCase):
+
+    tosca_helloworld = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        "data/tosca_helloworld.yaml")
+    template_file = '--template-file=' + tosca_helloworld
+    template_validation = "--validate-only"
+    debug="--debug"
+    failure_msg = _('The program raised an exception unexpectedly.')
+
+    log_level = logging.WARN
+    log = None
+
+    exp_descs = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  " \
+                "(%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('tosca-translator-ut')
+        cls.log.addHandler(stderr_handler)
+        cls.exp_descs = PingPongDescriptors()
+
+    def test_missing_arg(self):
+       self.assertRaises(SystemExit, shell.main, '')
+
+    def test_invalid_file_arg(self):
+        self.assertRaises(SystemExit, shell.main, 'translate me')
+
+    def test_invalid_file_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          ('--template-file=template.txt'))
+
+    def test_invalid_type_value(self):
+        self.assertRaises(SystemExit, shell.main,
+                          (self.template_file, '--template-type=xyz'))
+
+    def test_invalid_parameters(self):
+        self.assertRaises(ValueError, shell.main,
+                          (self.template_file,
+                           '--parameters=key'))
+
+    def test_valid_template(self):
+        try:
+            shell.main([self.template_file])
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(self.failure_msg)
+
+    def test_validate_only(self):
+        try:
+            shell.main([self.template_file,
+                        self.template_validation])
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(self.failure_msg)
+
+        template = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            "data/tosca_helloworld_invalid.yaml")
+        invalid_template = '--template-file=' + template
+        self.assertRaises(TOSCAException, shell.main,
+                          [invalid_template,
+                           self.template_validation])
+
+    def compare_dict(self, gen_d, exp_d):
+        gen = "--generated="+str(gen_d)
+        exp = "--expected="+str(exp_d)
+        CompareDescShell.compare_dicts(gen, exp, log=self.log)
+
+    def check_output(self, out_dir, archive=False):
+        prev_dir = os.getcwd()
+        os.chdir(out_dir)
+        # Check the archives or directories are present
+        dirs = os.listdir(out_dir)
+        # The desc dirs are using uuid, so cannot match name
+        # Check there are 3 dirs or files
+        self.assertTrue(len(dirs) >= 3)
+
+        try:
+            count = 0
+            for a in dirs:
+                desc = None
+                if archive:
+                    if os.path.isfile(a):
+                        self.log.debug("Checking archive: {}".format(a))
+                        with tarfile.open(a, 'r') as t:
+                            for m in t.getnames():
+                                if m.endswith('.yaml')  or m.endswith('.yml'):
+                                    # Descriptor file
+                                    t.extract(m)
+                                    self.log.debug("Extracted file: {}".format(m))
+                                    desc = m
+                                    break
+                    else:
+                        continue
+
+                else:
+                    if os.path.isdir(a):
+                        self.log.debug("Checking directory: {}".format(a))
+                        for m in os.listdir(a):
+                            if m.endswith('.yaml')  or m.endswith('.yml'):
+                                desc = os.path.join(a, m)
+                                break
+
+                if desc:
+                    self.log.debug("Checking descriptor: {}".format(desc))
+                    with open(desc, 'r') as d:
+                        rest, ext = os.path.splitext(desc)
+                        if '_vnfd.y' in desc:
+                            vnfd = convert.VnfdSerializer().from_file_hdl(d, ext)
+                            gen_desc = vnfd.as_dict()
+                            if 'ping_vnfd.y' in desc:
+                                exp_desc = self.exp_descs.ping_vnfd.as_dict()
+                            elif 'pong_vnfd.y' in desc:
+                                exp_desc = self.exp_descs.pong_vnfd.as_dict()
+                            else:
+                                raise Exception("Unknown VNFD descriptor: {}".
+                                                format(desc))
+                        elif '_nsd.y' in desc:
+                            nsd = convert.NsdSerializer().from_file_hdl(d, ext)
+                            gen_desc = nsd.as_dict()
+                            exp_desc = self.exp_descs.ping_pong_nsd.as_dict()
+                        else:
+                            raise Exception("Unknown file: {}".format(desc))
+
+                        # Compare the descriptors
+                        self.compare_dict(gen_desc, exp_desc)
+
+                        # Increment the count of descriptiors found
+                        count += 1
+
+            if count != 3:
+                raise Exception("Did not find expected number of descriptors: {}".
+                                format(count))
+        except Exception as e:
+            self.log.exception(e)
+            raise e
+
+        finally:
+            os.chdir(prev_dir)
+
+    def test_output_dir(self):
+        test_base_dir = os.path.join(os.path.dirname(
+            os.path.abspath(__file__)), 'data')
+        template_file = os.path.join(test_base_dir,
+                            "ping_pong_csar/Definitions/ping_pong_nsd.yaml")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+        try:
+            shell.main([template, output_dir], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in test_output_dir: {}".format(e))
+
+        else:
+            self.check_output(temp_dir)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+    def test_input_csar(self):
+        test_base_dir = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            'data')
+        template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+
+        try:
+            shell.main([template, output_dir, '--archive'], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in test_output_dir: {}".format(e))
+
+        else:
+            self.check_output(temp_dir, archive=True)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+    def test_input_csar_no_gi(self):
+        test_base_dir = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            'data')
+        template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+        template = '--template-file='+template_file
+        temp_dir = tempfile.mkdtemp()
+        output_dir = "--output-dir=" + temp_dir
+        no_gi = '--no-gi'
+
+        try:
+            shell.main([template, output_dir, no_gi, '--archive'], log=self.log)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception in input_csar_no_gi: {}".format(e))
+
+        else:
+            self.check_output(temp_dir, archive=True)
+
+        finally:
+            if self.log_level != logging.DEBUG:
+                if os.path.exists(temp_dir):
+                    shutil.rmtree(temp_dir)
+            else:
+                self.log.warn("Generated desc in {}".format(temp_dir))
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    TestToscaTranslator.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/tosca_translator/tosca-translator b/common/python/rift/mano/tosca_translator/tosca-translator
new file mode 100755 (executable)
index 0000000..6895d5d
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+#    Copyright 2016 RIFT.io Inc
+
+
+from rift.mano.tosca_translator import shell as translator_shell
+
+if __name__ == '__main__':
+    translator_shell.main()
diff --git a/common/python/rift/mano/tosca_translator/translator_logging.conf b/common/python/rift/mano/tosca_translator/translator_logging.conf
new file mode 100644 (file)
index 0000000..e55b02b
--- /dev/null
@@ -0,0 +1,43 @@
+
+[loggers]
+keys=root,tosca-translator
+
+[handlers]
+keys=RotatingFileHandler,SysLogHandler,NullHandler
+
+[formatters]
+keys=form01
+
+[logger_root]
+level=DEBUG
+handlers=NullHandler
+
+[logger_tosca-translator]
+level=INFO
+#one can be removed based on requirements
+handlers=SysLogHandler, RotatingFileHandler
+qualname=tosca-translator
+propagate=1
+
+[handler_RotatingFileHandler]
+class=handlers.RotatingFileHandler
+level=INFO
+formatter=form01
+#rotation happens after 100MB
+args=('/var/log/rift-translator.log', 'a', 100000000, 5, 'utf8')
+
+[handler_SysLogHandler]
+class=handlers.SysLogHandler
+formatter=form01
+level=INFO
+args=('/dev/log', handlers.SysLogHandler.LOG_SYSLOG)
+
+[handler_NullHandler]
+class=NullHandler
+formatter=form01
+level=DEBUG
+args=()
+
+[formatter_form01]
+format = %(asctime)s - %(name)s - %(levelname)s - %(filename)s : %(message)s
+datefmt =
diff --git a/common/python/rift/mano/utils/__init.py__ b/common/python/rift/mano/utils/__init.py__
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/utils/compare_desc.py b/common/python/rift/mano/utils/compare_desc.py
new file mode 100644 (file)
index 0000000..09b4fcd
--- /dev/null
@@ -0,0 +1,152 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from rift.mano.yang_translator.common.utils import _
+
+import yaml
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['yaml', 'json']
+    INDENT = 2
+
+    DIFF_KEYS = (
+        REMOVED_ITEMS,
+        ADDED_ITEMS,
+        ITER_ITEM_ADDED,
+        ITER_ITEM_REM,
+        TYPE_CHANGES,
+        VALUES_CHANGED,
+    ) = (
+        'dic_item_removed',
+        'dic_item_added',
+        'iterable_item_added',
+        'iterable_item_removed',
+        'type_changes',
+        'values_changed',
+    )
+
+    DIFF_MAP = {
+        REMOVED_ITEMS: 'Items removed',
+        ADDED_ITEMS: 'Items added',
+        ITER_ITEM_ADDED: 'Items added to list',
+        ITER_ITEM_REM: 'Items removed from list',
+        TYPE_CHANGES: 'Change in types',
+        VALUES_CHANGED: 'Change in values',
+    }
+
+    # Changes in following items are error
+    ERROR_ITEMS = [REMOVED_ITEMS, ADDED_ITEMS, ITER_ITEM_ADDED,
+                   ITER_ITEM_REM, TYPE_CHANGES, ]
+
+    @classmethod
+    def compare_dicts(cls, generated, expected, log=None):
+        """Compare two dictionaries and generate error if required"""
+        if log:
+            log.debug(_("Generated: {0}").format(generated))
+            log.debug(_("Expected: {0}").format(expected))
+
+        diff = DeepDiff(expected, generated)
+        if log:
+            log.debug(_("Keys in diff: {0}").format(diff.keys()))
+            log.info(_("Differences:\n"))
+
+        if log:
+            d = pprint.pformat(diff, indent=cls.INDENT)
+            log.info("Differences:\n{0}".format(d))
+
+        if len(set(cls.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            if log:
+                log.error(msg)
+            raise ValueError(msg)
+
+    def main(self, log, args):
+        self.log = log
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated) as g:
+            gen_data = g.read()
+            if args.type == 'yaml':
+                y_gen = yaml.load(gen_data)
+            else:
+                y_gen = json.loads(gen_data)
+
+        with open(args.expected) as e:
+            exp_data = e.read()
+            if args.type == 'yaml':
+                y_exp = yaml.load(exp_data)
+            else:
+                y_exp = json.loads(exp_data)
+
+        self.compare_dicts(y_gen, y_exp, log=self.log)
+
+
+def main(args=None, log=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected",
+        required=True,
+        help="Descriptor file to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='yaml',
+        help="File type. Default yaml")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if log is None:
+        if args.debug:
+            logging.basicConfig(level=logging.DEBUG)
+        else:
+            logging.basicConfig(level=logging.ERROR)
+        log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/utils/juju_api.py b/common/python/rift/mano/utils/juju_api.py
new file mode 100644 (file)
index 0000000..3f3b086
--- /dev/null
@@ -0,0 +1,1071 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+import argparse
+import asyncio
+from functools import partial
+import logging
+import os
+import ssl
+import sys
+import time
+
+try:
+    from jujuclient.juju1.environment import Environment as Env1
+    from jujuclient.juju2.environment import Environment as Env2
+except ImportError as e:
+    # Try importing older jujuclient
+    from jujuclient import Environment as Env1
+
+try:
+    ssl._create_default_https_context = ssl._create_unverified_context
+except AttributeError:
+    # Legacy Python doesn't verify by default (see pep-0476)
+    #   https://www.python.org/dev/peps/pep-0476/
+    pass
+
+
+class JujuVersionError(Exception):
+    pass
+
+
+class JujuApiError(Exception):
+    pass
+
+
+class JujuEnvError(JujuApiError):
+    pass
+
+
+class JujuModelError(JujuApiError):
+    pass
+
+
+class JujuStatusError(JujuApiError):
+    pass
+
+
+class JujuUnitsError(JujuApiError):
+    pass
+
+
+class JujuWaitUnitsError(JujuApiError):
+    pass
+
+
+class JujuSrvNotDeployedError(JujuApiError):
+    pass
+
+
+class JujuAddCharmError(JujuApiError):
+    pass
+
+
+class JujuDeployError(JujuApiError):
+    pass
+
+
+class JujuDestroyError(JujuApiError):
+    pass
+
+
+class JujuResolveError(JujuApiError):
+    pass
+
+
+class JujuActionError(JujuApiError):
+    pass
+
+
+class JujuActionApiError(JujuActionError):
+    pass
+
+
+class JujuActionInfoError(JujuActionError):
+    pass
+
+
+class JujuActionExecError(JujuActionError):
+    pass
+
+
+class JujuApi(object):
+    '''
+    JujuApi wrapper on jujuclient library
+
+    There should be one instance of JujuApi for each VNF manged by Juju.
+
+    Assumption:
+        Currently we use one unit per service/VNF. So once a service
+        is deployed, we store the unit name and reuse it
+'''
+    log = None
+
+    def __init__ (self,
+                  log=None,
+                  loop=None,
+                  server='127.0.0.1',
+                  port=17070,
+                  user='admin',
+                  secret=None,
+                  version=None):
+        '''Initialize with the Juju credentials'''
+        self.server = server
+        self.port = port
+
+        self.secret = secret
+        if user.startswith('user-'):
+            self.user = user
+        else:
+            self.user = 'user-{}'.format(user)
+
+        self.loop = loop
+
+        if log is not None:
+            self.log = log
+        else:
+            self.log = JujuApi._get_logger()
+
+        if self.log is None:
+            raise JujuApiError("Logger not defined")
+
+        self.version = None
+        if version:
+            self.version = version
+        else:
+            try:
+                if Env2:
+                    pass
+            except NameError:
+                self.log.warn("Using older version of Juju client, which " \
+                              "supports only Juju 1.x")
+                self.version = 1
+
+        endpoint = 'wss://%s:%d' % (server, int(port))
+        self.endpoint = endpoint
+
+        self.charm = None  # Charm used
+        self.service = None  # Service deployed
+        self.units = []  # Storing as list to support more units in future
+
+        self.destroy_retries = 25 # Number retires to destroy service
+        self.retry_delay = 5 # seconds
+
+    def __str__(self):
+        return ("JujuApi-{}".format(self.endpoint))
+
+    @classmethod
+    def _get_logger(cls):
+        if cls.log is not None:
+            return cls.log
+
+        fmt = logging.Formatter(
+            '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:' \
+            '%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=logging.DEBUG)
+        cls.log = logging.getLogger('juju-api')
+        cls.log.addHandler(stderr_handler)
+
+        return cls.log
+
+    @staticmethod
+    def format_charm_name(name):
+        '''Format the name to valid charm name
+
+        Charm service name accepts only a to z and -.
+        '''
+
+        new_name = ''
+        for c in name:
+            if c.isdigit():
+                c = chr(97 + int(c))
+            elif not c.isalpha():
+                c = "-"
+            new_name += c
+        return new_name.lower()
+
+    def _get_version_tag(self, tag):
+        version_tag_map = {
+            'applications': {
+                1: 'Services',
+                2: 'applications',
+            },
+            'units': {
+                1: 'Units',
+                2: 'units',
+            },
+            'status': {
+                1: 'Status',
+                2: 'status',
+            },
+            'workload-status': {
+                1: 'Workload',
+                2: 'workload-status',
+            },
+            'charm-url': {
+                1: 'CharmURL',
+                2: 'charm-url',
+            },
+        }
+
+        return version_tag_map[tag][self.version]
+
+    def _get_env1(self):
+        try:
+            env = Env1(self.endpoint)
+            l = env.login(self.secret, user=self.user)
+            return env
+
+        except ConnectionRefusedError as e:
+            msg = "{}: Failed Juju 1.x connect: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise e
+
+        except Exception as e:
+            msg = "{}: Failed Juju 1.x connect: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuEnvError(msg)
+
+    def _get_env2(self):
+        try:
+            env = Env2(self.endpoint)
+            l = env.login(self.secret, user=self.user)
+        except KeyError as e:
+            msg = "{}: Failed Juju 2.x connect: {}".format(self, e)
+            self.log.debug(msg)
+            raise JujuVersionError(msg)
+
+        try:
+            models = env.models.list()
+            for m in models['user-models']:
+                if m['model']['name'] == 'default':
+                    mep =  '{}/model/{}/api'.format(endpoint, m['model']['uuid'])
+                    model = Env2(mep, env_uuid=m['model']['uuid'])
+                    l = model.login(args.password, user=args.user)
+                    break
+
+            if model is None:
+                raise
+
+            return model
+
+        except Exception as e:
+            msg = "{}: Failed logging to model: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            env.close()
+            raise JujuModelError(msg)
+
+    def _get_env(self):
+        self.log.debug("{}: Connect to endpoint {}".
+                      format(self, self.endpoint))
+
+        if self.version is None:
+            # Try version 2 first
+            try:
+                env = self._get_env2()
+                self.version = 2
+
+            except JujuVersionError as e:
+                self.log.info("Unable to login as Juju 2.x, trying 1.x")
+                env = self._get_env1()
+                self.version = 1
+
+            return env
+
+        elif self.version == 2:
+            return self._get_env2()
+
+        elif self.version == 1:
+            return self._get_env1()
+
+        else:
+            msg = "{}: Unknown version set: {}".format(self, self.version)
+            self.log.error(msg)
+            raise JujuVersionError(msg)
+
+    @asyncio.coroutine
+    def get_env(self):
+        ''' Connect to the Juju controller'''
+        env = yield from self.loop.run_in_executor(
+            None,
+            self._get_env,
+        )
+        return env
+
+    def _get_status(self, env=None):
+        if env is None:
+            env = self._get_env()
+
+        try:
+            status = env.status()
+            return status
+
+        except Exception as e:
+            msg = "{}: exception in getting status: {}". \
+                  format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuStatusError(msg)
+
+    @asyncio.coroutine
+    def get_status(self, env=None):
+        '''Get Juju controller status'''
+        pf = partial(self._get_status, env=env)
+        status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return status
+
+    def get_all_units(self, status, service=None):
+        '''Parse the status and get the units'''
+        results = {}
+        services = status.get(self._get_version_tag('applications'), {})
+
+        for svc_name, svc_data in services.items():
+            if service and service != svc_name:
+                continue
+            units = svc_data[self._get_version_tag('units')] or {}
+
+            results[svc_name] = {}
+            for unit in units:
+                results[svc_name][unit] = \
+                        units[unit][self._get_version_tag('workload-status')] \
+                        [self._get_version_tag('status')] or None
+        return results
+
+
+    def _get_service_units(self, service=None, status=None, env=None):
+        if service is None:
+            service = self.service
+
+        # Optimizing calls to Juju, as currently we deploy only 1 unit per
+        # service.
+        if self.service == service and len(self.units):
+            return self.units
+
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        try:
+            resp = self.get_all_units(status, service=service)
+            self.log.debug("Get all units: {}".format(resp))
+            units = set(resp[service].keys())
+
+            if self.service == service:
+                self.units = units
+
+            return units
+
+        except Exception as e:
+            msg = "{}: exception in get units {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuUnitsError(msg)
+
+    @asyncio.coroutine
+    def get_service_units(self, service=None, status=None, env=None):
+        '''Get the unit names for a service'''
+        pf = partial(self._get_service_units,
+                     service=service,
+                     status=status,
+                     env=env)
+        units = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return units
+
+    def _get_service_status(self, service=None, status=None, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        if service is None:
+            service = self.service
+
+        try:
+            srv_status = status[self._get_version_tag('applications')] \
+                         [service][self._get_version_tag('status')] \
+                         [self._get_version_tag('status')]
+            self.log.debug("{}: Service {} status is {}".
+                           format(self, service, srv_status))
+            return srv_status
+
+        except KeyError as e:
+            self.log.info("self: Did not find service {}, e={}".format(self, service, e))
+            return 'NA'
+
+        except Exception as e:
+            msg = "{}: exception checking service status for {}, e {}". \
+                  format(self, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuStatusError(msg)
+
+
+    @asyncio.coroutine
+    def get_service_status(self, service=None, status=None, env=None):
+        ''' Get service status
+
+            maintenance : The unit is not yet providing services, but is actively doing stuff.
+            unknown : Service has finished an event but the charm has not called status-set yet.
+            waiting : Service is unable to progress to an active state because of dependency.
+            blocked : Service needs manual intervention to get back to the Running state.
+            active  : Service correctly offering all the services.
+            NA      : Service is not deployed
+        '''
+        pf = partial(self._get_service_status,
+                     service=service,
+                     status=status,
+                     env=env)
+        srv_status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return srv_status
+
+    def _is_service_deployed(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp not in ['terminated', 'NA']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_deployed(self, service=None, status=None, env=None):
+        '''Check if the service is deployed'''
+        pf = partial(self._is_service_deployed,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_error(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['error']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_error(self, service=None, status=None, env=None):
+        '''Check if the service is in error state'''
+        pf = partial(self._is_service_error,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_maint(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['maintenance']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_maint(self, service=None, status=None, env=None):
+        '''Check if the service is in error state'''
+        pf = partial(self._is_service_maint,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_active(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['active']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_active(self, service=None, status=None, env=None):
+        '''Check if the service is active'''
+        pf = partial(self._is_service_active,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_blocked(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['blocked']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_blocked(self, service=None, status=None, env=None):
+        '''Check if the service is blocked'''
+        pf = partial(self._is_service_blocked,
+                     service=service,
+                     status=status,
+                     env=env)
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _is_service_up(self, service=None, status=None, env=None):
+        resp = self._get_service_status(service=service,
+                                        status=status,
+                                        env=env)
+
+        if resp in ['active', 'blocked']:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def is_service_up(self, service=None, status=None, env=None):
+        '''Check if the service is installed and up'''
+        pf = partial(self._is_service_up,
+                     service=service,
+                     status=status,
+                     env=env)
+
+        rc = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return rc
+
+    def _apply_config(self, config, service=None, env=None):
+        if service is None:
+            service = self.service
+
+        if config is None or len(config) == 0:
+            self.log.warn("{}: Empty config passed for service {}".
+                          format(self, service))
+            return
+
+        if env is None:
+            env = self._get_env()
+
+        status = self._get_status(env=env)
+
+        if not self._is_service_deployed(service=service,
+                                         status=status,
+                                         env=env):
+            raise JujuSrvNotDeployedError("{}: service {} is not deployed".
+                                          format(self, service))
+
+        self.log.debug("{}: Config for service {} update to: {}".
+                       format(self, service, config))
+        try:
+            # Try to fix error on service, most probably due to config issue
+            if self._is_service_error(service=service, status=status, env=env):
+                self._resolve_error(service=service, env=env)
+
+            if self.version == 2:
+                env.service.set(service, config)
+            else:
+                env.set_config(service, config)
+
+        except Exception as e:
+            self.log.error("{}: exception setting config for {} with {}, e {}".
+                           format(self, service, config, e))
+            self.log.exception(e)
+            raise e
+
+    @asyncio.coroutine
+    def apply_config(self, config, service=None, env=None, wait=True):
+        '''Apply a config on the service'''
+        pf = partial(self._apply_config,
+                     config,
+                     service=service,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+        if wait:
+            # Wait till config finished applying
+            self.log.debug("{}: Wait for config apply to finish".
+                           format(self))
+            delay = 3  # secs
+            maint = True
+            while maint:
+                # Sleep first to give time for config_changed hook to be invoked
+                yield from asyncio.sleep(delay, loop=self.loop)
+                maint = yield from self.is_service_maint(service=service,
+                                                         env=env)
+
+        err = yield from self.is_service_error(service=service, env=env)
+        if err:
+            self.log.error("{}: Service is in error state".
+                           format(self))
+            return False
+
+        self.log.debug("{}: Finished applying config".format(self))
+        return True
+
+    def _set_parameter(self, parameter, value, service=None):
+        return self._apply_config({parameter : value}, service=service)
+
+    @asyncio.coroutine
+    def set_parameter(self, parameter, value, service=None):
+        '''Set a config parameter for a service'''
+        return self.apply_config({parameter : value}, service=service)
+
+    def _resolve_error(self, service=None, status=None, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if status is None:
+            status = self._get_status(env=env)
+
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = self._get_env()
+        if self._is_service_deployed(service=service, status=status):
+            units = self.get_all_units(status, service=service)
+
+            for unit, ustatus in units[service].items():
+                if ustatus == 'error':
+                    self.log.info("{}: Found unit {} with status {}".
+                                  format(self, unit, ustatus))
+                    try:
+                        # Takes the unit name as service_name/idx unlike action
+                        env.resolved(unit)
+
+                    except Exception as e:
+                        msg = "{}: Resolve on unit {}: {}". \
+                              format(self, unit, e)
+                        self.log.error(msg)
+                        self.log.exception(e)
+                        raise JujuResolveError(msg)
+
+    @asyncio.coroutine
+    def resolve_error(self, service=None, status=None, env=None):
+        '''Resolve units in error state'''
+        pf = partial(self._resolve_error,
+                     service=service,
+                     status=status,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+    def _deploy_service(self, charm, service,
+                        path=None, config=None, env=None):
+        self.log.debug("{}: Deploy service for charm {}({}) with service {}".
+                       format(self, charm, path, service))
+
+        if env is None:
+            env = self._get_env()
+
+        self.service = service
+        self.charm = charm
+
+        if self._is_service_deployed(service=service, env=env):
+            self.log.info("{}: Charm service {} already deployed".
+                          format (self, service))
+            if config:
+                self._apply_config(config, service=service, env=env)
+            return
+
+        series = "trusty"
+
+        deploy_to = None
+        if self.version == 1:
+            deploy_to = "lxc:0"
+
+        if path is None:
+            prefix=os.getenv('RIFT_INSTALL', '/')
+            path = os.path.join(prefix, 'usr/rift/charms', series, charm)
+
+        try:
+            self.log.debug("{}: Local charm settings: dir={}, series={}".
+                           format(self, path, series))
+            result = env.add_local_charm_dir(path, series)
+            url = result[self._get_version_tag('charm-url')]
+
+        except Exception as e:
+            msg = '{}: Error setting local charm directory {} for {}: {}'. \
+                  format(self, path, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuAddCharmError(msg)
+
+        try:
+            self.log.debug("{}: Deploying using: service={}, url={}, to={}, config={}".
+                           format(self, service, url, deploy_to, config))
+            env.deploy(service, url, config=config, machine_spec=deploy_to)
+
+        except Exception as e:
+            msg = '{}: Error deploying {}: {}'.format(self, service, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuDeployError(msg)
+
+    @asyncio.coroutine
+    def deploy_service(self, charm, service,
+                       wait=False, timeout=300,
+                       path=None, config=None):
+        '''Deploy a service using the charm name provided'''
+        env = yield from self.get_env()
+
+        pf = partial(self._deploy_service,
+                     charm,
+                     service,
+                     path=path,
+                     config=config,
+                     env=env)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+        rc = True
+        if wait is True:
+            # Wait for the deployed units to start
+            try:
+                self.log.debug("{}: Waiting for service {} to come up".
+                               format(self, service))
+                rc = yield from self.wait_for_service(timeout=timeout, env=env)
+
+            except Exception as e:
+                msg = '{}: Error starting all units for {}: {}'. \
+                      format(self, service, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                raise JujuWaitUnitsError(msg)
+
+        return rc
+
+    @asyncio.coroutine
+    def wait_for_service(self, service=None, timeout=0, env=None):
+        '''Wait for the service to come up'''
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = yield from self.get_env()
+
+        status = yield from self.get_status(env=env)
+
+        if self._is_service_up(service=service, status=status, env=env):
+            self.log.debug("{}: Service {} is already up".
+                               format(self, service))
+            return True
+
+        # Check if service is deployed
+        if not self._is_service_deployed(service=service, status=status, env=env):
+            raise JujuSrvNotDeployedError("{}: service {} is not deployed".
+                                          format(self, service))
+
+        if timeout < 0:
+            timeout = 0
+
+        count = 0
+        delay = self.retry_delay # seconds
+        self.log.debug("{}: In wait for service {}".format(self, service))
+
+        start_time = time.time()
+        max_time = time.time() + timeout
+        while timeout != 0 and (time.time() <= max_time):
+            count += 1
+            rc = yield from self.is_service_up(service=service, env=env)
+            if rc:
+                self.log.debug("{}: Service {} is up after {} seconds".
+                               format(self, service, time.time()-start_time))
+                return True
+            yield from asyncio.sleep(delay, loop=self.loop)
+        return False
+
+    def _destroy_service(self, service=None):
+        '''Destroy a service on Juju controller'''
+        self.log.debug("{}: Destroy charm service: {}".format(self,service))
+
+        if service is None:
+            service = self.service
+
+        env = self._get_env()
+
+        status = self._get_status(env=env)
+
+        count = 0
+        while self._is_service_deployed(service=service, status=status, env=env):
+            count += 1
+            self.log.debug("{}: Destroy service {}, count {}".
+                           format(self, service, count))
+
+            if count > self.destroy_retries:
+                msg = "{}: Not able to destroy service {} after {} tries". \
+                      format(self, service, count)
+                self.log.error(msg)
+                raise JujuDestroyError(msg)
+
+
+            if self._is_service_error(service=service, status=status):
+                self._resolve_error(service, status)
+
+            try:
+                env.destroy_service(service)
+
+            except Exception as e:
+                msg = "{}: Exception when running destroy on service {}: {}". \
+                      format(self, service, e)
+                self.log.error(msg)
+                self.log.exception(e)
+                raise JujuDestroyError(msg)
+
+            time.sleep(self.retry_delay)
+            status = self._get_status(env=env)
+
+        self.log.debug("{}: Destroyed service {} ({})".
+                       format(self, service, count))
+
+    @asyncio.coroutine
+    def destroy_service(self, service=None):
+        '''Destroy a service on Juju controller'''
+        pf = partial(self._destroy_service,
+                     service=service)
+        yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+
+
+    def _get_action_status(self, action_tag, env=None):
+        if env is None:
+            env = self._get_env()
+
+        if not action_tag.startswith('action-'):
+            action_tag = 'action-{}'.format(action_tag)
+
+        try:
+            action = env.actions
+        except Exception as e:
+            msg = "{}: exception in Action API: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionApiError(msg)
+
+        try:
+            status = action.info([{'Tag': action_tag}])
+
+            self.log.debug("{}: Action {} status {}".
+                           format(self, action_tag, status))
+            return status['results'][0]
+
+        except Exception as e:
+            msg = "{}: exception in get action status {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionInfoError(msg)
+
+    @asyncio.coroutine
+    def get_action_status(self, action_tag, env=None):
+        '''
+        Get the status of an action queued on the controller
+
+        responds with the action status, which is one of three values:
+
+         - completed
+         - pending
+         - failed
+
+         @param action_tag - the action UUID return from the enqueue method
+         eg: action-3428e20d-fcd7-4911-803b-9b857a2e5ec9
+        '''
+        pf = partial(self._get_action_status,
+                     action_tag,
+                     env=env,)
+        status = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return status
+
+    def _execute_action(self, action_name, params, service=None, env=None):
+        '''Execute the action on all units of a service'''
+        if service is None:
+            service = self.service
+
+        if env is None:
+            env = self._get_env()
+
+        try:
+            action = env.actions
+        except Exception as e:
+            msg = "{}: exception in Action API: {}".format(self, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionApiError(msg)
+
+        units = self._get_service_units(service)
+        self.log.debug("{}: Apply action {} on units {}".
+                       format(self, action_name, units))
+
+        # Rename units from <service>/<n> to unit-<service>-<n>
+        unit_tags = []
+        for unit in units:
+            idx = int(unit[unit.index('/')+1:])
+            unit_name = "unit-%s-%d" % (service, idx)
+            unit_tags.append(unit_name)
+        self.log.debug("{}: Unit tags for action: {}".
+                       format(self, unit_tags))
+
+        try:
+            result = action.enqueue_units(unit_tags, action_name, params)
+            self.log.debug("{}: Response for action: {}".
+                           format(self, result))
+            return result['results'][0]
+
+        except Exception as e:
+            msg = "{}: Exception enqueing action {} on units {} with " \
+                  "params {}: {}".format(self, action, unit_tags, params, e)
+            self.log.error(msg)
+            self.log.exception(e)
+            raise JujuActionExecError(msg)
+
+    @asyncio.coroutine
+    def execute_action(self, action_name, params, service=None, env=None):
+        '''Execute an action for a service on the controller
+
+        Currently, we execute the action on all units of the service
+        '''
+        pf = partial(self._execute_action,
+                     action_name,
+                     params,
+                     service=service,
+                     env=env)
+        result = yield from self.loop.run_in_executor(
+            None,
+            pf,
+        )
+        return result
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Test Juju')
+    parser.add_argument("-s", "--server", default='10.0.202.49', help="Juju controller")
+    parser.add_argument("-u", "--user", default='admin', help="User, default user-admin")
+    parser.add_argument("-p", "--password", default='nfvjuju', help="Password for the user")
+    parser.add_argument("-P", "--port", default=17070, help="Port number, default 17070")
+    parser.add_argument("-d", "--directory", help="Local directory for the charm")
+    parser.add_argument("--service", help="Charm service name")
+    parser.add_argument("--vnf-ip", help="IP of the VNF to configure")
+    args = parser.parse_args()
+
+    api = JujuApi(server=args.server,
+                  port=args.port,
+                  user=args.user,
+                  secret=args.password)
+
+    env = api._get_env()
+    if env is None:
+        raise "Not able to login to the Juju controller"
+
+    print("Status: {}".format(api._get_status(env=env)))
+
+    if args.directory and args.service:
+        # Deploy the charm
+        charm = os.path.basename(args.directory)
+        api._deploy_service(charm, args.service,
+                            path=args.directory,
+                            env=env)
+
+        while not api._is_service_up():
+            time.sleep(5)
+
+        print ("Service {} is deployed with status {}".
+               format(args.service, api._get_service_status()))
+
+        if args.vnf_ip and \
+           ('clearwater-aio' in args.directory):
+            # Execute config on charm
+            api._apply_config({'proxied_ip': args.vnf_ip})
+
+            while not api._is_service_active():
+                time.sleep(10)
+
+            print ("Service {} is in status {}".
+                   format(args.service, api._get_service_status()))
+
+            res = api._execute_action('create-update-user', {'number': '125252352525',
+                                                             'password': 'asfsaf'})
+
+            print ("Action 'creat-update-user response: {}".format(res))
+
+            status = res['status']
+            while status not in [ 'completed', 'failed' ]:
+                time.sleep(2)
+                status = api._get_action_status(res['action']['tag'])['status']
+
+                print("Action status: {}".format(status))
+
+            # This action will fail as the number is non-numeric
+            res = api._execute_action('delete-user', {'number': '125252352525asf'})
+
+            print ("Action 'delete-user response: {}".format(res))
+
+            status = res['status']
+            while status not in [ 'completed', 'failed' ]:
+                time.sleep(2)
+                status = api._get_action_status(res['action']['tag'])['status']
+
+                print("Action status: {}".format(status))
diff --git a/common/python/rift/mano/yang_translator/__init__.py b/common/python/rift/mano/yang_translator/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/common/__init__.py b/common/python/rift/mano/yang_translator/common/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/common/exception.py b/common/python/rift/mano/yang_translator/common/exception.py
new file mode 100644 (file)
index 0000000..4d51ebb
--- /dev/null
@@ -0,0 +1,243 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+'''
+Exceptions for the YANG Translator package.
+'''
+
+import logging
+import sys
+import traceback
+
+from rift.mano.yang_translator.common.utils import _
+
+log = logging.getLogger(__name__)
+
+
+class YANGException(Exception):
+    '''Base exception class for YANG
+
+    To correctly use this class, inherit from it and define
+    a 'msg_fmt' property.
+
+    '''
+
+    _FATAL_EXCEPTION_FORMAT_ERRORS = False
+
+    message = _('An unknown exception occurred.')
+
+    def __init__(self, **kwargs):
+        try:
+            self.message = self.msg_fmt % kwargs
+        except KeyError:
+            exc_info = sys.exc_info()
+            log.exception(_('Exception in string format operation: %s')
+                          % exc_info[1])
+
+            if YANGException._FATAL_EXCEPTION_FORMAT_ERRORS:
+                raise exc_info[0]
+
+    def __str__(self):
+        return self.message
+
+    @staticmethod
+    def generate_inv_schema_property_error(self, attr, value, valid_values):
+        msg = (_('Schema definition of "%(propname)s" has '
+                 '"%(attr)s" attribute with invalid value '
+                 '"%(value1)s". The value must be one of '
+                 '"%(value2)s".') % {"propname": self.name,
+                                     "attr": attr,
+                                     "value1": value,
+                                     "value2": valid_values})
+        ExceptionCollector.appendException(
+            InvalidSchemaError(message=msg))
+
+    @staticmethod
+    def set_fatal_format_exception(flag):
+        if isinstance(flag, bool):
+            YANGException._FATAL_EXCEPTION_FORMAT_ERRORS = flag
+
+
+class MissingRequiredFieldError(YANGException):
+    msg_fmt = _('%(what)s is missing required field "%(required)s".')
+
+
+class UnknownFieldError(YANGException):
+    msg_fmt = _('%(what)s contains unknown field "%(field)s". Refer to the '
+                'definition to verify valid values.')
+
+
+class TypeMismatchError(YANGException):
+    msg_fmt = _('%(what)s must be of type "%(type)s".')
+
+
+class InvalidNodeTypeError(YANGException):
+    msg_fmt = _('Node type "%(what)s" is not a valid type.')
+
+
+class InvalidTypeError(YANGException):
+    msg_fmt = _('Type "%(what)s" is not a valid type.')
+
+
+class InvalidSchemaError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class ValidationError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class UnknownInputError(YANGException):
+    msg_fmt = _('Unknown input "%(input_name)s".')
+
+
+class InvalidPropertyValueError(YANGException):
+    msg_fmt = _('Value of property "%(what)s" is invalid.')
+
+
+class InvalidTemplateVersion(YANGException):
+    msg_fmt = _('The template version "%(what)s" is invalid. '
+                'Valid versions are "%(valid_versions)s".')
+
+
+class InvalidYANGVersionPropertyException(YANGException):
+    msg_fmt = _('Value of YANG version property "%(what)s" is invalid.')
+
+
+class URLException(YANGException):
+    msg_fmt = _('%(what)s')
+
+
+class YangExtImportError(YANGException):
+    msg_fmt = _('Unable to import extension "%(ext_name)s". '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class YangExtAttributeError(YANGException):
+    msg_fmt = _('Missing attribute in extension "%(ext_name)s". '
+                'Check to see that it has required attributes '
+                '"%(attrs)s" defined.')
+
+
+class InvalidGroupTargetException(YANGException):
+    msg_fmt = _('"%(message)s"')
+
+
+class ConfFileParseError(YANGException):
+    msg_fmt = _('%(message)s')
+
+
+class ConfOptionNotDefined(YANGException):
+    msg_fmt = _('Option %(key)s in section %(section)s '
+                'is not defined in conf file')
+
+
+class ConfSectionNotDefined(YANGException):
+    msg_fmt = _('Section %(section)s is not defined in conf file')
+
+
+class YangModImportError(YANGException):
+    msg_fmt = _('Unable to import module %(mod_name)s. '
+                'Check to see that it exists and has no '
+                'language definition errors.')
+
+
+class YangClassImportError(YANGException):
+    msg_fmt = _('Unable to import class %(name)s in '
+                'module %(mod_name)s. Check to see that it '
+                'exists and has no language definition errors.')
+
+
+class YangClassAttributeError(YANGException):
+    msg_fmt = _('Class attribute referenced not found. '
+                '%(message)s. Check to see that it is defined.')
+
+
+class ExceptionCollector(object):
+
+    exceptions = []
+    collecting = False
+
+    @staticmethod
+    def clear():
+        del ExceptionCollector.exceptions[:]
+
+    @staticmethod
+    def start():
+        ExceptionCollector.clear()
+        ExceptionCollector.collecting = True
+
+    @staticmethod
+    def stop():
+        ExceptionCollector.collecting = False
+
+    @staticmethod
+    def contains(exception):
+        for ex in ExceptionCollector.exceptions:
+            if str(ex) == str(exception):
+                return True
+        return False
+
+    @staticmethod
+    def appendException(exception):
+        if ExceptionCollector.collecting:
+            if not ExceptionCollector.contains(exception):
+                exception.trace = traceback.extract_stack()[:-1]
+                ExceptionCollector.exceptions.append(exception)
+        else:
+            raise exception
+
+    @staticmethod
+    def exceptionsCaught():
+        return len(ExceptionCollector.exceptions) > 0
+
+    @staticmethod
+    def getTraceString(traceList):
+        traceString = ''
+        for entry in traceList:
+            f, l, m, c = entry[0], entry[1], entry[2], entry[3]
+            traceString += (_('\t\tFile %(file)s, line %(line)s, in '
+                              '%(method)s\n\t\t\t%(call)s\n')
+                            % {'file': f, 'line': l, 'method': m, 'call': c})
+        return traceString
+
+    @staticmethod
+    def getExceptionReportEntry(exception, full=True):
+        entry = exception.__class__.__name__ + ': ' + str(exception)
+        if full:
+            entry += '\n' + ExceptionCollector.getTraceString(exception.trace)
+        return entry
+
+    @staticmethod
+    def getExceptions():
+        return ExceptionCollector.exceptions
+
+    @staticmethod
+    def getExceptionsReport(full=True):
+        report = []
+        for exception in ExceptionCollector.exceptions:
+            report.append(
+                ExceptionCollector.getExceptionReportEntry(exception, full))
+        return report
+
+    @staticmethod
+    def assertExceptionMessage(exception, message):
+        err_msg = exception.__name__ + ': ' + message
+        report = ExceptionCollector.getExceptionsReport(False)
+        assert err_msg in report, (_('Could not find "%(msg)s" in "%(rep)s".')
+                                   % {'rep': report.__str__(), 'msg': err_msg})
diff --git a/common/python/rift/mano/yang_translator/common/utils.py b/common/python/rift/mano/yang_translator/common/utils.py
new file mode 100644 (file)
index 0000000..c44e8f5
--- /dev/null
@@ -0,0 +1,231 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gettext
+import numbers
+import os
+
+from six.moves.urllib.parse import urlparse
+
+import yaml
+
+_localedir = os.environ.get('yang-translator'.upper() + '_LOCALEDIR')
+_t = gettext.translation('yang-translator', localedir=_localedir,
+                         fallback=True)
+
+
+def _(msg):
+    return _t.gettext(msg)
+
+
+class CompareUtils(object):
+
+    MISMATCH_VALUE1_LABEL = "<Expected>"
+    MISMATCH_VALUE2_LABEL = "<Provided>"
+    ORDERLESS_LIST_KEYS = ['allowed_values', 'depends_on']
+
+    @staticmethod
+    def compare_dicts(dict1, dict2, log):
+        """Return False if not equal, True if both are equal."""
+
+        if dict1 is None and dict2 is None:
+            return True
+        if dict1 is None or dict2 is None:
+            return False
+
+        both_equal = True
+        for dict1_item, dict2_item in zip(dict1.items(), dict2.items()):
+            if dict1_item != dict2_item:
+                msg = (_("%(label1)s: %(item1)s \n is not equal to \n:"
+                         "%(label2)s: %(item2)s")
+                       % {'label1': CompareUtils.MISMATCH_VALUE2_LABEL,
+                          'item1': dict1_item,
+                          'label2': CompareUtils.MISMATCH_VALUE1_LABEL,
+                          'item2': dict2_item})
+                log.warning(msg)
+                both_equal = False
+                break
+        return both_equal
+
+    @staticmethod
+    def reorder(dic):
+        '''Canonicalize list items in the dictionary for ease of comparison.
+
+        For properties whose value is a list in which the order does not
+        matter, some pre-processing is required to bring those lists into a
+        canonical format. We use sorting just to make sure such differences
+        in ordering would not cause to a mismatch.
+        '''
+
+        if type(dic) is not dict:
+            return None
+
+        reordered = {}
+        for key in dic.keys():
+            value = dic[key]
+            if type(value) is dict:
+                reordered[key] = CompareUtils.reorder(value)
+            elif type(value) is list \
+                and key in CompareUtils.ORDERLESS_LIST_KEYS:
+                reordered[key] = sorted(value)
+            else:
+                reordered[key] = value
+        return reordered
+
+    @staticmethod
+    def diff_dicts(dict1, dict2, reorder=True):
+        '''Compares two dictionaries and returns their differences.
+
+        Returns a dictionary of mismatches between the two dictionaries.
+        An empty dictionary is returned if two dictionaries are equivalent.
+        The reorder parameter indicates whether reordering is required
+        before comparison or not.
+        '''
+
+        if reorder:
+            dict1 = CompareUtils.reorder(dict1)
+            dict2 = CompareUtils.reorder(dict2)
+
+        if dict1 is None and dict2 is None:
+            return {}
+        if dict1 is None or dict2 is None:
+            return {CompareUtils.MISMATCH_VALUE1_LABEL: dict1,
+                    CompareUtils.MISMATCH_VALUE2_LABEL: dict2}
+
+        diff = {}
+        keys1 = set(dict1.keys())
+        keys2 = set(dict2.keys())
+        for key in keys1.union(keys2):
+            if key in keys1 and key not in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: dict1[key],
+                             CompareUtils.MISMATCH_VALUE2_LABEL: None}
+            elif key not in keys1 and key in keys2:
+                diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: None,
+                             CompareUtils.MISMATCH_VALUE2_LABEL: dict2[key]}
+            else:
+                val1 = dict1[key]
+                val2 = dict2[key]
+                if val1 != val2:
+                    if type(val1) is dict and type(val2) is dict:
+                        diff[key] = CompareUtils.diff_dicts(val1, val2, False)
+                    else:
+                        diff[key] = {CompareUtils.MISMATCH_VALUE1_LABEL: val1,
+                                     CompareUtils.MISMATCH_VALUE2_LABEL: val2}
+        return diff
+
+
+class YamlUtils(object):
+
+    @staticmethod
+    def get_dict(yaml_file):
+        '''Returns the dictionary representation of the given YAML spec.'''
+        try:
+            return yaml.load(open(yaml_file))
+        except IOError:
+            return None
+
+    @staticmethod
+    def compare_yamls(yaml1_file, yaml2_file):
+        '''Returns true if two dictionaries are equivalent, false otherwise.'''
+        dict1 = YamlUtils.get_dict(yaml1_file)
+        dict2 = YamlUtils.get_dict(yaml2_file)
+        return CompareUtils.compare_dicts(dict1, dict2)
+
+    @staticmethod
+    def compare_yaml_dict(yaml_file, dic):
+        '''Returns true if yaml matches the dictionary, false otherwise.'''
+        return CompareUtils.compare_dicts(YamlUtils.get_dict(yaml_file), dic)
+
+
+class UrlUtils(object):
+
+    @staticmethod
+    def validate_url(path):
+        """Validates whether the given path is a URL or not.
+
+        If the given path includes a scheme (http, https, ftp, ...) and a net
+        location (a domain name such as www.github.com) it is validated as a
+        URL.
+        """
+        parsed = urlparse(path)
+        return bool(parsed.scheme) and bool(parsed.netloc)
+
+
+def str_to_num(value):
+    """Convert a string representation of a number into a numeric type."""
+    if isinstance(value, numbers.Number):
+        return value
+    try:
+        return int(value)
+    except ValueError:
+        return float(value)
+
+
+def map_name_to_python(name):
+    if name == 'type':
+        return 'type_yang'
+    return name.replace('-', '_')
+
+
+def convert_keys_to_python(d):
+    '''Change all keys from - to _'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[map_name_to_python(key)] = convert_keys_to_python(d.pop(key))
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_python(memb))
+        return arr
+    else:
+        return d
+
+def map_name_to_yang(name):
+    return name.replace('_', '-')
+
+
+def convert_keys_to_yang(d):
+    '''Change all keys from _ to -'''
+    if isinstance(d, dict):
+        for key in d.keys():
+            d[map_name_to_python(key)] = convert_keys_to_yang(d.pop(key))
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(convert_keys_to_yang(memb))
+        return arr
+    else:
+        return d
+
+
+def stringify_dict(d):
+    '''Convert all integer, float, etc to str'''
+    if isinstance(d, dict):
+        for key in d.keys():
+                d[key] = stringify_dict(d[key])
+        return d
+    elif isinstance(d, list):
+        arr = []
+        for memb in d:
+            arr.append(stringify_dict(memb))
+        return arr
+    else:
+        if not isinstance(d, str):
+            return str(d)
+        return d
diff --git a/common/python/rift/mano/yang_translator/compare_desc.py b/common/python/rift/mano/yang_translator/compare_desc.py
new file mode 100644 (file)
index 0000000..39fdd85
--- /dev/null
@@ -0,0 +1,114 @@
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import argparse
+import json
+import logging
+import logging.config
+import pprint
+
+from deepdiff import DeepDiff
+
+from toscaparser.utils.gettextutils import _
+
+
+class CompareDescShell(object):
+
+    SUPPORTED_TYPES = ['json']
+    INDENT = 2
+    DIFF_KEYS = (REMOVED_ITEMS, ADDED_ITEMS, TYPE_CHANGES, VALUES_CHANGED) = \
+                ('dic_item_removed', 'dic_item_added', 'type_changes',
+                 'values_changed')
+    DIFF_MAP = {REMOVED_ITEMS: 'Items removed',
+                ADDED_ITEMS: 'Items added',
+                TYPE_CHANGES: 'Changes in types',
+                VALUES_CHANGED: 'Changes in values'}
+    # Currently considering changes in removed keys or type changes
+    # as error.
+    ERROR_ITEMS = [REMOVED_ITEMS, TYPE_CHANGES]
+
+    def main(self, log, args):
+        self.log = log
+        print("Args: {}".format(args))
+        self.log.debug(_("Args: {0}").format(args))
+        if args.type not in self.SUPPORTED_TYPES:
+            self.log.error(_("Unsupported file type {0}").
+                           format(args.type))
+            exit(1)
+
+        with open(args.generated_file) as g:
+            gen_data = g.read()
+            json_gen = json.loads(gen_data)
+            self.log.debug(_("Generated: {0}").format(json_gen))
+
+        with open(args.expected_file) as e:
+            exp_data = e.read()
+            json_exp = json.loads(exp_data)
+            self.log.debug(_("Expected: {0}").format(json_exp))
+
+        diff = DeepDiff(json_exp, json_gen)
+        self.log.debug(_("Keys in diff: {0}").format(diff.keys()))
+        self.log.info(_("Differences:\n"))
+
+        d = pprint.pformat(diff, indent=self.INDENT)
+        self.log.info("Differences:\n{0}".format(d))
+
+        if len(set(self.ERROR_ITEMS).intersection(diff.keys())):
+            diff_str = pprint.pformat(diff)
+            msg = _("Found item changes: {0}").format(diff_str)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None):
+    parser = argparse.ArgumentParser(
+        description='Validate descriptors by comparing')
+    parser.add_argument(
+        "-g",
+        "--generated-file",
+        required=True,
+        help="Generated descriptor file")
+    parser.add_argument(
+        "-e",
+        "--expected-file",
+        required=True,
+        help="Descriptor to compare")
+    parser.add_argument(
+        "-t",
+        "--type",
+        default='json',
+        help="File type. Default json")
+    parser.add_argument(
+        "--debug",
+        help="Enable debug logging",
+        action="store_true")
+    if args:
+        args = parser.parse_args(args)
+    else:
+        args = parser.parse_args()
+
+    if args.debug:
+        logging.basicConfig(level=logging.DEBUG)
+    else:
+        logging.basicConfig(level=logging.ERROR)
+    log = logging.getLogger("rwmano-translator")
+
+    CompareDescShell().main(log, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/conf/__init__.py b/common/python/rift/mano/yang_translator/conf/__init__.py
new file mode 100644 (file)
index 0000000..b718eee
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Initialize the global configuration for the translator '''
+
+import os
+
+from rift.mano.yang_translator.conf.config import ConfigProvider
+
+CONF_FILENAME = 'translator.conf'
+
+
+def init_global_conf():
+    '''Initialize the configuration provider.
+
+    Allows the configuration to be shared throughout the translator code.
+    The file used is translator.conf, and is within the conf/ directory. It
+    is a standard ini format, and is prcessed using the ConfigParser module.
+
+    '''
+    conf_path = os.path.dirname(os.path.abspath(__file__))
+    conf_file = os.path.join(conf_path, CONF_FILENAME)
+    ConfigProvider._load_config(conf_file)
+
+
+init_global_conf()
diff --git a/common/python/rift/mano/yang_translator/conf/config.py b/common/python/rift/mano/yang_translator/conf/config.py
new file mode 100644 (file)
index 0000000..631db40
--- /dev/null
@@ -0,0 +1,70 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Copyright 2016 RIFT.io Inc
+
+
+''' Provide a global configuration for the TOSCA translator'''
+
+import rift.mano.yang_translator.common.exception as exception
+from rift.mano.yang_translator.common.utils import _
+
+from six.moves import configparser
+
+
+class ConfigProvider(object):
+    '''Global config proxy that wraps a ConfigParser object.
+
+    Allows for class based access to config values. Should only be initialized
+    once using the corresponding translator.conf file in the conf directory.
+
+    '''
+
+    # List that captures all of the conf file sections.
+    # Append any new sections to this list.
+    _sections = ['DEFAULT']
+    _translator_config = None
+
+    @classmethod
+    def _load_config(cls, conf_file):
+        '''Private method only to be called once from the __init__ module'''
+
+        cls._translator_config = configparser.ConfigParser()
+        try:
+            cls._translator_config.read(conf_file)
+        except configparser.ParsingError:
+            msg = _('Unable to parse translator.conf file.'
+                    'Check to see that it exists in the conf directory.')
+            raise exception.ConfFileParseError(message=msg)
+
+    @classmethod
+    def get_value(cls, section, key):
+        try:
+            value = cls._translator_config.get(section, key)
+        except configparser.NoOptionError:
+            raise exception.ConfOptionNotDefined(key=key, section=section)
+        except configparser.NoSectionError:
+            raise exception.ConfSectionNotDefined(section=section)
+
+        return value
+
+    @classmethod
+    def get_all_values(cls):
+        values = []
+        for section in cls._sections:
+            try:
+                values.extend(cls._translator_config.items(section=section))
+            except configparser.NoOptionError:
+                raise exception.ConfSectionNotDefined(section=section)
+
+        return values
diff --git a/common/python/rift/mano/yang_translator/conf/translator.conf b/common/python/rift/mano/yang_translator/conf/translator.conf
new file mode 100644 (file)
index 0000000..23214f3
--- /dev/null
@@ -0,0 +1,4 @@
+[DEFAULT]
+
+# Relative path location for custom types
+custom_types_location=rift/mano/yang_translator/custom/rwmano
\ No newline at end of file
diff --git a/common/python/rift/mano/yang_translator/custom/__init__.py b/common/python/rift/mano/yang_translator/custom/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/custom/rwmano/__init__.py b/common/python/rift/mano/yang_translator/custom/rwmano/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/common/python/rift/mano/yang_translator/rwmano/__init__.py b/common/python/rift/mano/yang_translator/rwmano/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py b/common/python/rift/mano/yang_translator/rwmano/syntax/__init__.py
new file mode 100644 (file)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_resource.py
new file mode 100644 (file)
index 0000000..f05933b
--- /dev/null
@@ -0,0 +1,221 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from rift.mano.yang_translator.common.utils import _
+
+
+class ToscaResource(object):
+    '''Base class for YANG node type translation to RIFT.io TOSCA type.'''
+
+    # Used when creating the resource, so keeping separate
+    # from REQUIRED_FIELDS below
+    NAME = 'name'
+
+    REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID) = \
+                      ('description', 'version', 'vendor', 'id')
+
+    COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE) = \
+                    ('path', 'port', 'host', 'xpath', 'type', 'count', 'file')
+
+    IGNORE_FIELDS = ['short_name']
+
+    FIELD_TYPES = (STRING, MAP, INTEGER, BOOL) = \
+                  ('string', 'map', 'integer', 'boolean',)
+
+    YANG_KEYS = (VLD, NSD, VNFD, VDU, DASHBOARD_PARAMS,
+                 CONFIG_ATTR, CONFIG_TMPL,
+                 CONFIG_TYPE, CONFIG_DETAILS, EXT_INTF,
+                 VIRT_INTF, POLL_INTVL_SECS,
+                 MEM_VNF_INDEX_REF, VNFD_ID_REF,
+                 MEM_VNF_INDEX, VNF_CONFIG, TYPE_Y,
+                 USER_DEF_SCRIPT, SEQ, PARAM,
+                 VALUE, START_BY_DFLT,) = \
+                ('vld', 'nsd', 'vnfd', 'vdu', 'dashboard_params',
+                 'config_attributes', 'config_template',
+                 'config_type', 'config_details', 'external_interface',
+                 'virtual_interface', 'polling_interval_secs',
+                 'member_vnf_index_ref', 'vnfd_id_ref',
+                 'member_vnf_index', 'vnf_configuration', 'type_yang',
+                 'user_defined_script', 'seq', 'parameter',
+                 'value', 'start_by_default',)
+
+    TOSCA_FIELDS = (DERIVED_FROM, PROPERTIES, DEFAULT, REQUIRED,
+                    NO, CONSTRAINTS, REALTIONSHIPS,
+                    REQUIREMENTS, UNBOUND, NODE,
+                    OCCURENCES, PRIMITIVES, MEMBERS,
+                    POLL_INTVL, DEFAULT, TRUE, FALSE,) = \
+                   ('derived_from', 'properties', 'default', 'required',
+                    'no', 'constraints', 'relationships',
+                    'requirements', 'UNBOUND', 'node',
+                    'occurences', 'primitives', 'members',
+                    'polling_interval', 'default', 'true', 'false')
+
+    TOSCA_SEC = (DATA_TYPES, CAPABILITY_TYPES, NODE_TYPES,
+                 GROUP_TYPES, POLICY_TYPES, REQUIREMENTS,
+                 ARTIFACTS, PROPERTIES, INTERFACES,
+                 CAPABILITIES, RELATIONSHIP,
+                 ARTIFACT_TYPES) = \
+                ('data_types', 'capability_types', 'node_types',
+                 'group_types', 'policy_types', 'requirements',
+                 'artifacts', 'properties', 'interfaces',
+                 'capabilities', 'relationship',
+                 'artifact_types')
+
+    TOSCA_TMPL = (INPUTS, NODE_TMPL, GROUPS, POLICIES,
+                  METADATA, TOPOLOGY_TMPL, OUTPUTS) = \
+                 ('inputs', 'node_templates', 'groups', 'policies',
+                  'metadata', 'topology_template', 'outputs')
+
+    TOSCA_DERIVED = (
+        T_VNF_CONFIG,
+        T_HTTP_EP,
+        T_MGMT_INTF,
+        T_MON_PARAM,
+        T_VNF1,
+        T_VDU1,
+        T_CP1,
+        T_VL1,
+        T_CONF_PRIM,
+        T_SCALE_GRP,
+        T_ARTF_QCOW2,
+        T_INITIAL_CFG,
+    ) = \
+        ('tosca.datatypes.network.riftio.vnf_configuration',
+         'tosca.capabilities.riftio.http_endpoint_type',
+         'tosca.capabilities.riftio.mgmt_interface_type',
+         'tosca.capabilities.riftio.monitoring_param',
+         'tosca.nodes.riftio.VNF1',
+         'tosca.nodes.riftio.VDU1',
+         'tosca.nodes.riftio.CP1',
+         'tosca.nodes.riftio.VL1',
+         'tosca.groups.riftio.ConfigPrimitives',
+         'tosca.policies.riftio.ScalingGroup',
+         'tosca.artifacts.Deployment.Image.riftio.QCOW2',
+         'tosca.policies.riftio.InitialConfigPrimitive'
+        )
+
+    SUPPORT_FILES = ( SRC, DEST, EXISTING) = \
+                    ('source', 'destination', 'existing')
+
+    SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR,) = \
+                   ('images', 'scripts',)
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        self.log = log
+        self.name = name
+        self.type_ = type_
+        self.yang = yang
+        self.id_ = None
+        log.debug(_('Translating YANG node %(name)s of type %(type)s') %
+                  {'name': self.name,
+                   'type': self.type_})
+
+    # Added the below property menthods to support methods that
+    # works on both toscaparser.NodeType and translator.ToscaResource
+    @property
+    def type(self):
+        return self.type_
+
+    @type.setter
+    def type(self, value):
+        self.type_ = value
+
+    def get_type(self):
+        return self.type_
+
+    @property
+    def id(self):
+        return self.id_
+
+    @id.setter
+    def id(self, value):
+        self.id_ = value
+
+    @property
+    def description(self):
+        return _("Translated from YANG")
+
+    @property
+    def vendor(self):
+        if self._vendor is None:
+            if self.metadata and 'vendor' in self.metadata:
+                self._vendor = self.metadata['vendor']
+            else:
+                self._vendor = "RIFT.io"
+        return self._vendor
+
+    @property
+    def version(self):
+        if self._version is None:
+            if self.metadata and 'version' in self.metadata:
+                self._version = str(self.metadata['version'])
+            else:
+                self._version = '1.0'
+        return self._version
+
+    def __str__(self):
+        return "%s(%s)" % (self.name, self.type)
+
+    def map_yang_name_to_tosca(self, name):
+        new_name = name.replace("_", "-")
+        return new_name
+
+    def map_keys_to_tosca(self, d):
+        if isinstance(d, dict):
+            for key in d.keys():
+                d[self.map_yang_name_to_tosca(key)] = \
+                                    self.map_keys_to_tosca(d.pop(key))
+            return d
+        elif isinstance(d, list):
+            arr = []
+            for memb in d:
+                arr.append(self.map_keys_to_tosca(memb))
+            return arr
+        else:
+            return d
+
+    def handle_yang(self):
+        self.log.debug(_("Need to implement handle_yang for {0}").
+                       format(self))
+
+    def remove_ignored_fields(self, d):
+        '''Remove keys in dict not used'''
+        for key in self.IGNORE_FIELDS:
+            if key in d:
+                d.pop(key)
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("Need to implement generate_tosca_type for {0}").
+                       format(self))
+
+    def generate_tosca_model(self, tosca):
+        self.log.debug(_("Need to implement generate_tosca_model for {0}").
+                       format(self))
+
+    def get_supporting_files(self):
+        """Get list of other required files for each resource"""
+        pass
+
+    def get_matching_item(self, name, items, key=None):
+        if key is None:
+            key = 'name'
+        for entry in items:
+            if entry[key] == name:
+                return entry
+        return None
diff --git a/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py b/common/python/rift/mano/yang_translator/rwmano/syntax/tosca_template.py
new file mode 100644 (file)
index 0000000..7c31df5
--- /dev/null
@@ -0,0 +1,135 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import OrderedDict
+
+import textwrap
+
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+import yaml
+
+
+class ToscaTemplate(object):
+    '''Container for full RIFT.io TOSCA template.'''
+
+    KEYS = (TOSCA, FILES) = ('tosca', 'files')
+
+    def __init__(self, log):
+        self.log = log
+        self.resources = []
+
+    def output_to_tosca(self):
+        self.log.debug(_('Converting translated output to tosca template.'))
+
+        templates = {}
+
+        for resource in self.resources:
+            # Each NSD should generate separate templates
+            if resource.type == 'nsd':
+                tmpl = resource.generate_tosca_type()
+                tmpl = resource.generate_tosca_template(tmpl)
+                self.log.debug(_("TOSCA template generated for {0}:\n{1}").
+                               format(resource.name, tmpl))
+                templates[resource.name] = {self.TOSCA: self.output_to_yaml(tmpl)}
+                files = resource.get_supporting_files()
+                if len(files):
+                    templates[resource.name][self.FILES] = files
+
+        return templates
+
+    def represent_ordereddict(self, dumper, data):
+        nodes = []
+        for key, value in data.items():
+            node_key = dumper.represent_data(key)
+            node_value = dumper.represent_data(value)
+            nodes.append((node_key, node_value))
+        return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', nodes)
+
+    def ordered_node(self, node):
+        order = [ToscaResource.TYPE, ToscaResource.DERIVED_FROM,
+                 ToscaResource.DESC, ToscaResource.MEMBERS,
+                 ToscaResource.PROPERTIES, ToscaResource.CAPABILITIES,
+                 ToscaResource.REQUIREMENTS,ToscaResource.ARTIFACTS,
+                 ToscaResource.INTERFACES]
+        new_node = OrderedDict()
+        for ent in order:
+            if ent in node:
+                new_node.update({ent: node.pop(ent)})
+
+        # Check if we missed any entry
+        if len(node):
+            self.log.warn(_("Did not sort these entries: {0}").
+                          format(node))
+            new_node.update(node)
+
+        return new_node
+
+    def ordered_nodes(self, nodes):
+        new_nodes = OrderedDict()
+        if isinstance(nodes, dict):
+            for name, node in nodes.items():
+                new_nodes.update({name: self.ordered_node(node)})
+            return new_nodes
+        else:
+            return nodes
+
+    def output_to_yaml(self, tosca):
+        self.log.debug(_('Converting translated output to yaml format.'))
+        dict_output = OrderedDict()
+
+        dict_output.update({'tosca_definitions_version':
+                            tosca['tosca_definitions_version']})
+        # Description
+        desc_str = ""
+        if ToscaResource.DESC in tosca:
+            # Wrap the text to a new line if the line exceeds 80 characters.
+            wrapped_txt = "\n  ". \
+                          join(textwrap.wrap(tosca[ToscaResource.DESC], 80))
+            desc_str = ToscaResource.DESC + ": >\n  " + \
+                       wrapped_txt + "\n\n"
+        dict_output.update({ToscaResource.DESC: tosca[ToscaResource.DESC]})
+
+        if ToscaResource.METADATA in tosca:
+            dict_output.update({ToscaResource.METADATA:
+                               tosca[ToscaResource.METADATA]})
+
+        # Add all types
+        types_list = [ToscaResource.DATA_TYPES, ToscaResource.CAPABILITY_TYPES,
+                      ToscaResource.NODE_TYPES,
+                      ToscaResource.GROUP_TYPES, ToscaResource.POLICY_TYPES]
+        for typ in types_list:
+            if typ in tosca:
+                dict_output.update({typ: self.ordered_nodes(tosca[typ])})
+
+        # Add topology template
+        topo_list = [ToscaResource.INPUTS, ToscaResource.NODE_TMPL,
+                     ToscaResource.GROUPS, ToscaResource.POLICIES,
+                     ToscaResource.OUTPUTS]
+        if ToscaResource.TOPOLOGY_TMPL in tosca:
+            tmpl = OrderedDict()
+            for typ in tosca[ToscaResource.TOPOLOGY_TMPL]:
+                tmpl.update({typ:
+                             self.ordered_nodes(
+                                 tosca[ToscaResource.TOPOLOGY_TMPL][typ])})
+            dict_output.update({ToscaResource.TOPOLOGY_TMPL: tmpl})
+
+        yaml.add_representer(OrderedDict, self.represent_ordereddict)
+        yaml_string = yaml.dump(dict_output, default_flow_style=False)
+        # get rid of the '' from yaml.dump around numbers
+        yaml_string = yaml_string.replace('\'', '')
+        self.log.debug(_("YAML output:\n{0}").format(yaml_string))
+        return yaml_string
diff --git a/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py b/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
new file mode 100644 (file)
index 0000000..f0a6866
--- /dev/null
@@ -0,0 +1,192 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import importlib
+import os
+
+from rift.mano.yang_translator.common.exception import YangClassAttributeError
+from rift.mano.yang_translator.common.exception import YangClassImportError
+from rift.mano.yang_translator.common.exception import YangModImportError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.conf.config import ConfigProvider \
+    as translatorConfig
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+
+class TranslateDescriptors(object):
+    '''Translate YANG NodeTemplates to RIFT.io MANO Resources.'''
+
+    YANG_DESC = (NSD, VNFD) = ('nsd', 'vnfd')
+
+    ###########################
+    # Module utility Functions
+    # for dynamic class loading
+    ###########################
+
+    YANG_TO_TOSCA_TYPE = None
+
+    def _load_classes(log, locations, classes):
+        '''Dynamically load all the classes from the given locations.'''
+
+        for cls_path in locations:
+            # Use the absolute path of the class path
+            abs_path = os.path.dirname(os.path.abspath(__file__))
+            abs_path = abs_path.replace('rift/mano/yang_translator/rwmano',
+                                        cls_path)
+            log.debug(_("Loading classes from %s") % abs_path)
+
+            # Grab all the yang type module files in the given path
+            mod_files = [f for f in os.listdir(abs_path) if (
+                f.endswith('.py') and
+                not f.startswith('__init__') and
+                f.startswith('yang_'))]
+
+            # For each module, pick out the target translation class
+            for f in mod_files:
+                f_name, ext = f.rsplit('.', 1)
+                mod_name = cls_path + '/' + f_name
+                mod_name = mod_name.replace('/', '.')
+                try:
+                    mod = importlib.import_module(mod_name)
+                    target_name = getattr(mod, 'TARGET_CLASS_NAME')
+                    clazz = getattr(mod, target_name)
+                    classes.append(clazz)
+                except ImportError:
+                    raise YangModImportError(mod_name=mod_name)
+                except AttributeError:
+                    if target_name:
+                        raise YangClassImportError(name=target_name,
+                                                    mod_name=mod_name)
+                    else:
+                        # TARGET_CLASS_NAME is not defined in module.
+                        # Re-raise the exception
+                        raise
+
+    def _generate_type_map(log):
+        '''Generate YANG translation types map.
+
+        Load user defined classes from location path specified in conf file.
+        Base classes are located within the yang directory.
+        '''
+
+        # Base types directory
+        BASE_PATH = 'rift/mano/yang_translator/rwmano/yang'
+
+        # Custom types directory defined in conf file
+        custom_path = translatorConfig.get_value('DEFAULT',
+                                                 'custom_types_location')
+
+        # First need to load the parent module, for example 'contrib.mano',
+        # for all of the dynamically loaded classes.
+        classes = []
+        TranslateDescriptors._load_classes(log,
+                                             (BASE_PATH, custom_path),
+                                             classes)
+        try:
+            types_map = {clazz.yangtype: clazz for clazz in classes}
+            log.debug(_("Type maps loaded: {}").format(types_map.keys()))
+        except AttributeError as e:
+            raise YangClassAttributeError(message=e.message)
+
+        return types_map
+
+    def __init__(self, log, yangs, tosca_template):
+        self.log = log
+        self.yangs = yangs
+        self.tosca_template = tosca_template
+        # list of all TOSCA resources generated
+        self.tosca_resources = []
+        self.metadata = {}
+        log.debug(_('Mapping between YANG nodetemplate and TOSCA resource.'))
+
+    def translate(self):
+        if TranslateDescriptors.YANG_TO_TOSCA_TYPE is None:
+            TranslateDescriptors.YANG_TO_TOSCA_TYPE = \
+                TranslateDescriptors._generate_type_map(self.log)
+        return self._translate_yang()
+
+    def translate_metadata(self):
+        """Translate and store the metadata in instance"""
+        FIELDS_MAP = {
+            'ID': 'name',
+            'vendor': 'vendor',
+            'version': 'version',
+        }
+        metadata = {}
+        # Initialize to default values
+        metadata['name'] = 'yang_to_tosca'
+        metadata['vendor'] = 'RIFT.io'
+        metadata['version'] = '1.0'
+        if 'nsd' in self.yangs:
+            yang_meta = self.yang['nsd'][0]
+        elif 'vnfd' in self.yangs:
+            yang_meta = self.yang['vnfd'][0]
+            for key in FIELDS_MAP:
+                if key in yang_meta.keys():
+                    metadata[key] = str(yang_meta[FIELDS_MAP[key]])
+        self.log.debug(_("Metadata {0}").format(metadata))
+        self.metadata = metadata
+
+    def _translate_yang(self):
+        self.log.debug(_('Translating the descriptors.'))
+        for nsd in self.yangs[self.NSD]:
+            self.log.debug(_("Translate descriptor of type nsd: {}").
+                           format(nsd))
+            tosca_node = TranslateDescriptors. \
+                         YANG_TO_TOSCA_TYPE[self.NSD](
+                             self.log,
+                             nsd.pop(ToscaResource.NAME),
+                             self.NSD,
+                             nsd)
+            self.tosca_resources.append(tosca_node)
+
+        for vnfd in self.yangs[self.VNFD]:
+            self.log.debug(_("Translate descriptor of type vnfd: {}").
+                           format(vnfd))
+            tosca_node = TranslateDescriptors. \
+                         YANG_TO_TOSCA_TYPE[self.VNFD](
+                             self.log,
+                             vnfd.pop(ToscaResource.NAME),
+                             self.VNFD,
+                             vnfd)
+            self.tosca_resources.append(tosca_node)
+
+        # First translate VNFDs
+        for node in self.tosca_resources:
+            if node.type == self.VNFD:
+                self.log.debug(_("Handle yang for {0} of type {1}").
+                               format(node.name, node.type_))
+                node.handle_yang()
+
+        # Now translate NSDs
+        for node in self.tosca_resources:
+            if node.type == self.NSD:
+                self.log.debug(_("Handle yang for {0} of type {1}").
+                               format(node.name, node.type_))
+                node.handle_yang(self.tosca_resources)
+
+        return self.tosca_resources
+
+    def find_tosca_resource(self, name):
+        for resource in self.tosca_resources:
+            if resource.name == name:
+                return resource
+
+    def _find_yang_node(self, yang_name):
+        for node in self.nodetemplates:
+            if node.name == yang_name:
+                return node
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/__init__.py b/common/python/rift/mano/yang_translator/rwmano/yang/__init__.py
new file mode 100755 (executable)
index 0000000..bc4710b
--- /dev/null
@@ -0,0 +1,15 @@
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_nsd.py
new file mode 100644 (file)
index 0000000..491bd86
--- /dev/null
@@ -0,0 +1,396 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+import os
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.yang.yang_vld import YangVld
+
+TARGET_CLASS_NAME = 'YangNsd'
+
+
+class YangNsd(ToscaResource):
+    '''Class for RIFT.io YANG NS descriptor translation to TOSCA type.'''
+
+    yangtype = 'nsd'
+
+    OTHER_FIELDS = (SCALE_GRP, CONF_PRIM,
+                    USER_DEF_SCRIPT, SCALE_ACT,
+                    TRIGGER, NS_CONF_PRIM_REF,
+                    CONST_VNFD, VNFD_MEMBERS,
+                    MIN_INST_COUNT, MAX_INST_COUNT,
+                    INPUT_PARAM_XPATH, CONFIG_ACTIONS,
+                    INITIAL_CFG,) = \
+                   ('scaling_group_descriptor', 'service_primitive',
+                    'user_defined_script', 'scaling_config_action',
+                    'trigger', 'ns_config_primitive_name_ref',
+                    'constituent_vnfd', 'vnfd_member',
+                    'min_instance_count', 'max_instance_count',
+                    'input_parameter_xpath', 'config_actions',
+                    'initial_config_primitive',)
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangNsd, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.props = {}
+        self.inputs = []
+        self.vnfds = {}
+        self.vlds = []
+        self.conf_prims = []
+        self.scale_grps = []
+        self.initial_cfg = []
+
+    def handle_yang(self, vnfds):
+        self.log.debug(_("Process NSD desc {0}: {1}").
+                       format(self.name, self.yang))
+
+        def process_input_param(param):
+            if self.XPATH in param:
+                val = param.pop(self.XPATH)
+                # Strip namesapce, catalog and nsd part
+                self.inputs.append({
+                    self.NAME:
+                    self.map_yang_name_to_tosca(
+                        val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+            if len(param):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "input param {1}: {2}").
+                              format(self, self.inputs, param))
+            self.log.debug(_("{0}, inputs: {1}").format(self, self.inputs[-1]))
+
+        def process_const_vnfd(cvnfd):
+            # Get the matching VNFD
+            vnfd_id = cvnfd.pop(self.VNFD_ID_REF)
+            for vnfd in vnfds:
+                if vnfd.type == self.VNFD and vnfd.id == vnfd_id:
+                    self.vnfds[cvnfd.pop(self.MEM_VNF_INDEX)] = vnfd
+                    if self.START_BY_DFLT in cvnfd:
+                        vnfd.props[self.START_BY_DFLT] = \
+                                            cvnfd.pop(self.START_BY_DFLT)
+                    break
+
+            if len(cvnfd):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "constituent vnfd {1}: {2}").
+                              format(self, vnfd_id, cvnfd))
+            self.log.debug(_("{0}, VNFD: {1}").format(self, self.vnfds))
+
+        def process_scale_grp(dic):
+            sg = {}
+            self.log.debug(_("{0}, scale group: {1}").format(self, dic))
+            fields = [self.NAME, self.MIN_INST_COUNT, self.MAX_INST_COUNT]
+            for key in fields:
+                if key in dic:
+                    sg[key] = dic.pop(key)
+
+            membs = {}
+            for vnfd_memb in dic.pop(self.VNFD_MEMBERS):
+                vnfd_idx = vnfd_memb[self.MEM_VNF_INDEX_REF]
+                if vnfd_idx in self.vnfds:
+                        membs[self.vnfds[vnfd_idx].name] = \
+                                                    vnfd_memb[self.COUNT]
+            sg['vnfd_members'] = membs
+
+            trigs = {}
+            if self.SCALE_ACT in dic:
+                for sg_act in dic.pop(self.SCALE_ACT):
+                    # Validate the primitive
+                    prim = sg_act.pop(self.NS_CONF_PRIM_REF)
+                    for cprim in self.conf_prims:
+                        if cprim[self.NAME] == prim:
+                            trigs[sg_act.pop(self.TRIGGER)] = prim
+                            break
+                    if len(sg_act):
+                        err_msg = (_("{0}, Did not find config-primitive {1}").
+                                   format(self, prim))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+            sg[self.CONFIG_ACTIONS] = trigs
+
+            if len(dic):
+                self.log.warn(_("{0}, Did not process all fields for {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, Scale group {1}").format(self, sg))
+            self.scale_grps.append(sg)
+
+        def process_initial_config(dic):
+            icp = {}
+            self.log.debug(_("{0}, initial config: {1}").format(self, dic))
+            for key in [self.NAME, self.SEQ, self.USER_DEF_SCRIPT]:
+                if key in dic:
+                    icp[key] = dic.pop(key)
+
+            params = {}
+            if self.PARAM in dic:
+                for p in dic.pop(self.PARAM):
+                    if (self.NAME in p and
+                        self.VALUE in p):
+                        params[p[self.NAME]] = p[self.VALUE]
+                    else:
+                        # TODO (pjoseph): Need to add support to read the
+                        # config file and get the value from that
+                        self.log.warn(_("{0}, Got parameter without value: {1}").
+                                      format(self, p))
+                if len(params):
+                    icp[self.PARAM] = params
+
+            if len(dic):
+                self.log.warn(_("{0}, Did not process all fields for {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, Initial config {1}").format(self, icp))
+            self.initial_cfg.append(icp)
+
+        dic = deepcopy(self.yang)
+        try:
+            for key in self.REQUIRED_FIELDS:
+                self.props[key] = dic.pop(key)
+
+            self.id = self.props[self.ID]
+
+            # Process constituent VNFDs
+            if self.CONST_VNFD in dic:
+                for cvnfd in dic.pop(self.CONST_VNFD):
+                    process_const_vnfd(cvnfd)
+
+            # Process VLDs
+            if self.VLD in dic:
+                for vld_dic in dic.pop(self.VLD):
+                    vld = YangVld(self.log, vld_dic.pop(self.NAME),
+                                  self.VLD, vld_dic)
+                    vld.process_vld(self.vnfds)
+                    self.vlds.append(vld)
+
+            # Process config primitives
+            if self.CONF_PRIM in dic:
+                for cprim in dic.pop(self.CONF_PRIM):
+                    conf_prim = {self.NAME: cprim.pop(self.NAME)}
+                    if self.USER_DEF_SCRIPT in cprim:
+                        conf_prim[self.USER_DEF_SCRIPT] = \
+                                        cprim.pop(self.USER_DEF_SCRIPT)
+                        self.conf_prims.append(conf_prim)
+                    else:
+                        err_msg = (_("{0}, Only user defined script supported "
+                                     "in config-primitive for now {}: {}").
+                                   format(self, conf_prim, cprim))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+
+            # Process scaling group
+            if self.SCALE_GRP in dic:
+                for sg_dic in dic.pop(self.SCALE_GRP):
+                    process_scale_grp(sg_dic)
+
+            # Process initial config primitives
+            if self.INITIAL_CFG in dic:
+                for icp_dic in dic.pop(self.INITIAL_CFG):
+                    process_initial_config(icp_dic)
+
+            # Process the input params
+            if self.INPUT_PARAM_XPATH in dic:
+                for param in dic.pop(self.INPUT_PARAM_XPATH):
+                    process_input_param(param)
+
+            self.remove_ignored_fields(dic)
+            if len(dic):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "NSD {1}: {2}").
+                              format(self, self.props, dic))
+            self.log.debug(_("{0}, NSD: {1}").format(self, self.props))
+        except Exception as e:
+            err_msg = _("Exception processing NSD {0} : {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            self.log.exception(e)
+            raise ValidationError(message=err_msg)
+
+    def generate_tosca_type(self):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self))
+
+        tosca = {}
+        tosca[self.DATA_TYPES] = {}
+        tosca[self.NODE_TYPES] = {}
+
+        for idx, vnfd in self.vnfds.items():
+            tosca = vnfd.generate_tosca_type(tosca)
+
+        for vld in self.vlds:
+            tosca = vld.generate_tosca_type(tosca)
+
+        # Generate type for config primitives
+        if self.GROUP_TYPES not in tosca:
+            tosca[self.GROUP_TYPES] = {}
+        if self.T_CONF_PRIM not in tosca[self.GROUP_TYPES]:
+            tosca[self.GROUP_TYPES][self.T_CONF_PRIM] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    'primitive': self.MAP
+                }}
+
+        # Generate type for scaling group
+        if self.POLICY_TYPES not in tosca:
+            tosca[self.POLICY_TYPES] = {}
+        if self.T_SCALE_GRP not in tosca[self.POLICY_TYPES]:
+            tosca[self.POLICY_TYPES][self.T_SCALE_GRP] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING},
+                    self.MAX_INST_COUNT:
+                    {self.TYPE: self.INTEGER},
+                    self.MIN_INST_COUNT:
+                    {self.TYPE: self.INTEGER},
+                    'vnfd_members':
+                    {self.TYPE: self.MAP},
+                    self.CONFIG_ACTIONS:
+                    {self.TYPE: self.MAP}
+                }}
+
+        if self.T_INITIAL_CFG not in tosca[self.POLICY_TYPES]:
+            tosca[self.POLICY_TYPES][self.T_INITIAL_CFG] = {
+                self.DERIVED_FROM: 'tosca.policies.Root',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING},
+                    self.SEQ:
+                    {self.TYPE: self.INTEGER},
+                    self.USER_DEF_SCRIPT:
+                    {self.TYPE: self.STRING},
+                    self.PARAM:
+                    {self.TYPE: self.MAP},
+                }}
+
+        return tosca
+
+    def generate_tosca_template(self, tosca):
+        self.log.debug(_("{0}, Generate tosca template").
+                       format(self, tosca))
+
+        # Add the standard entries
+        tosca['tosca_definitions_version'] = \
+                                    'tosca_simple_profile_for_nfv_1_0_0'
+        tosca[self.DESC] = self.props[self.DESC]
+        tosca[self.METADATA] = {
+            'ID': self.name,
+            self.VENDOR: self.props[self.VENDOR],
+            self.VERSION: self.props[self.VERSION],
+        }
+
+        tosca[self.TOPOLOGY_TMPL] = {}
+
+        # Add input params
+        if len(self.inputs):
+            if self.INPUTS not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.INPUTS] = {}
+            for inp in self.inputs:
+                entry = {inp[self.NAME]: {self.TYPE: self.STRING,
+                                          self.DESC:
+                                          'Translated from YANG'}}
+                tosca[self.TOPOLOGY_TMPL][self.INPUTS] = entry
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
+
+        # Add the VNFDs and VLDs
+        for idx, vnfd in self.vnfds.items():
+            vnfd.generate_vnf_template(tosca, idx)
+
+        for vld in self.vlds:
+            vld.generate_tosca_template(tosca)
+
+        # add the config primitives
+        if len(self.conf_prims):
+            if self.GROUPS not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.GROUPS] = {}
+
+            conf_prims = {
+                self.TYPE: self.T_CONF_PRIM
+            }
+            conf_prims[self.MEMBERS] = [vnfd.name for vnfd in
+                                        self.vnfds.values()]
+            prims = {}
+            for confp in self.conf_prims:
+                prims[confp[self.NAME]] = {
+                    self.USER_DEF_SCRIPT: confp[self.USER_DEF_SCRIPT]
+                }
+            conf_prims[self.PROPERTIES] = {
+                self.PRIMITIVES: prims
+            }
+
+            tosca[self.TOPOLOGY_TMPL][self.GROUPS][self.CONF_PRIM] = conf_prims
+
+
+        # Add the scale group
+        if len(self.scale_grps):
+            if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+            for sg in self.scale_grps:
+                sgt = {
+                    self.TYPE: self.T_SCALE_GRP,
+                }
+                sgt.update(sg)
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+                    self.SCALE_GRP: sgt
+                })
+
+        # Add initial configs
+        if len(self.initial_cfg):
+            if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+            for icp in self.initial_cfg:
+                icpt = {
+                    self.TYPE: self.T_INITIAL_CFG,
+                }
+                icpt.update(icp)
+                tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+                    self.INITIAL_CFG: icpt
+                })
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        for vnfd in self.vnfds.values():
+            f = vnfd.get_supporting_files()
+            if f and len(f):
+                files.extend(f)
+
+        # Get the config files for initial config
+        for icp in self.initial_cfg:
+            if self.USER_DEF_SCRIPT in icp:
+                script = os.path.basename(icp[self.USER_DEF_SCRIPT])
+                files.append({
+                    self.TYPE: 'script',
+                    self.NAME: script,
+                    self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
+                })
+
+        # TODO (pjoseph): Add support for config scripts,
+        # charms, etc
+
+        self.log.debug(_("{0}, supporting files: {1}").format(self, files))
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vdu.py
new file mode 100644 (file)
index 0000000..7d095c1
--- /dev/null
@@ -0,0 +1,302 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import shutil
+import tempfile
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+import rift.package.image
+
+TARGET_CLASS_NAME = 'YangVdu'
+
+
+class YangVdu(ToscaResource):
+    '''Class for RIFT.io YANG VDU descriptor translation to TOSCA type.'''
+
+    yangtype = 'vdu'
+
+    OTHER_KEYS = (VM_FLAVOR, CLOUD_INIT, IMAGE, IMAGE_CHKSUM,
+                  VNFD_CP_REF, CP_TYPE, CLOUD_INIT_FILE,) = \
+                 ('vm_flavor', 'cloud_init', 'image', 'image_checksum',
+                  'vnfd_connection_point_ref', 'cp_type', 'cloud_init_file',)
+
+    TOSCA_MISC_KEYS = (VIRT_LINK, VIRT_BIND, VDU_INTF_NAME,
+                       VDU_INTF_TYPE) = \
+                      ('virtualLink', 'virtualBinding', 'vdu_intf_name',
+                       'vdu_intf_type')
+
+    VM_FLAVOR_MAP = {
+        'vcpu_count': 'num_cpus',
+        'memory_mb': 'mem_size',
+        'storage_gb': 'disk_size',
+    }
+
+    VM_SIZE_UNITS_MAP = {
+        'vcpu_count': '',
+        'memory_mb': ' MB',
+        'storage_gb': ' GB',
+    }
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVdu, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.yang = yang
+        self.props = {}
+        self.ext_cp = []
+        self.int_cp = []
+        self.image = None
+        self.cloud_init_file = None
+
+    def process_vdu(self):
+        self.log.debug(_("Process VDU desc {0}: {1}").format(self.name,
+                                                             self.yang))
+
+        vdu_dic = deepcopy(self.yang)
+        vdu = {}
+
+        fields = [self.ID, self.COUNT, self.CLOUD_INIT,
+                  self.IMAGE, self.IMAGE_CHKSUM, self.CLOUD_INIT_FILE,]
+        for key in fields:
+            if key in vdu_dic:
+                vdu[key] = vdu_dic.pop(key)
+
+        self.id = vdu[self.ID]
+
+        if self.VM_FLAVOR in vdu_dic:
+            vdu[self.HOST] = {}
+            for key, value in vdu_dic.pop(self.VM_FLAVOR).items():
+                vdu[self.HOST][self.VM_FLAVOR_MAP[key]] = "{}{}". \
+                            format(value, self.VM_SIZE_UNITS_MAP[key])
+
+        if self.EXT_INTF in vdu_dic:
+            for ext_intf in vdu_dic.pop(self.EXT_INTF):
+                cp = {}
+                cp[self.NAME] = ext_intf.pop(self.VNFD_CP_REF)
+                cp[self.VDU_INTF_NAME] = ext_intf.pop(self.NAME)
+                cp[self.VDU_INTF_TYPE] = ext_intf[self.VIRT_INTF][self.TYPE_Y]
+                self.log.debug(_("{0}, External interface {1}: {2}").
+                               format(self, cp, ext_intf))
+                self.ext_cp.append(cp)
+
+        self.remove_ignored_fields(vdu_dic)
+        if len(vdu_dic):
+            self.log.warn(_("{0}, Did not process the following in "
+                            "VDU: {1}").
+                          format(self, vdu_dic))
+
+        self.log.debug(_("{0} VDU: {1}").format(self, vdu))
+        self.props = vdu
+
+    def get_cp(self, name):
+        for cp in self.ext_cp:
+            if cp[self.NAME] == name:
+                return cp
+        return None
+
+    def has_cp(self, name):
+        if self.get_cp(name):
+            return True
+        return False
+
+    def set_cp_type(self, name, cp_type):
+        for idx, cp in enumerate(self.ext_cp):
+            if cp[self.NAME] == name:
+                cp[self.CP_TYPE] = cp_type
+                self.ext_cp[idx] = cp
+                self.log.debug(_("{0}, Updated CP: {1}").
+                               format(self, self.ext_cp[idx]))
+                return
+
+        err_msg = (_("{0}, Did not find connection point {1}").
+                   format(self, name))
+        self.log.error(err_msg)
+        raise ValidationError(message=err_msg)
+
+    def set_vld(self, name, vld_name):
+        cp = self.get_cp(name)
+        if cp:
+            cp[self.VLD] = vld_name
+        else:
+            err_msg = (_("{0}, Did not find connection point {1}").
+                       format(self, name))
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+    def get_name(self, vnf_name):
+        # Create a unique name incase multiple VNFs use same
+        # name for the vdu
+        return "{}_{}".format(vnf_name, self.name)
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        # Add custom artifact type
+        if self.ARTIFACT_TYPES not in tosca:
+            tosca[self.ARTIFACT_TYPES] = {}
+        if self.T_ARTF_QCOW2 not in tosca[self.ARTIFACT_TYPES]:
+            tosca[self.ARTIFACT_TYPES][self.T_ARTF_QCOW2] = {
+                self.DERIVED_FROM: 'tosca.artifacts.Deployment.Image.VM.QCOW2',
+                self.IMAGE_CHKSUM:
+                {self.TYPE: self.STRING,
+                 self.REQUIRED: self.NO},
+            }
+
+        if self.T_VDU1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VDU1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VDU',
+                self.PROPERTIES: {
+                    self.COUNT:
+                    {self.TYPE: self.INTEGER,
+                     self.DEFAULT: 1},
+                    self.CLOUD_INIT:
+                    {self.TYPE: self.STRING,
+                     self.REQUIRED: self.NO,},
+                    self.CLOUD_INIT_FILE:
+                    {self.TYPE: self.STRING,
+                     self.REQUIRED: self.NO,},
+                },
+                self.CAPABILITIES: {
+                    self.VIRT_LINK: {
+                        self.TYPE: 'tosca.capabilities.nfv.VirtualLinkable'
+                    },
+                },
+            }
+
+        # Add CP type
+        if self.T_CP1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_CP1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.CP',
+                self.PROPERTIES: {
+                    self.NAME:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Name of the connection point'},
+                    self.CP_TYPE:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Type of the connection point'},
+                    self.VDU_INTF_NAME:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Name of the interface on VDU'},
+                    self.VDU_INTF_TYPE:
+                    {self.TYPE: self.STRING,
+                     self.DESC: 'Type of the interface on VDU'},
+                },
+             }
+
+        return tosca
+
+    def generate_vdu_template(self, tosca, vnf_name):
+        self.log.debug(_("{0} Generate tosca template for {2}").
+                       format(self, tosca, vnf_name))
+
+        name = self.get_name(vnf_name)
+
+        node = {}
+        node[self.TYPE] = self.T_VDU1
+
+        if self.HOST in self.props:
+            node[self.CAPABILITIES] = {
+                self.HOST: {self.PROPERTIES: self.props.pop(self.HOST)}
+            }
+        else:
+            self.log.warn(_("{0}, Does not have host requirements defined").
+                          format(self))
+
+        if self.IMAGE in self.props:
+            img_name = "{}_{}_vm_image".format(vnf_name, self.name)
+            image = "../{}/{}".format(self.IMAGE_DIR, self.props.pop(self.IMAGE))
+            self.image = image
+            node[self.ARTIFACTS] = {img_name: {
+                self.FILE: image,
+                self.TYPE: self.T_ARTF_QCOW2,
+            }}
+            if self.IMAGE_CHKSUM in self.props:
+                node[self.ARTIFACTS][img_name][self.IMAGE_CHKSUM] = \
+                                            self.props.pop(self.IMAGE_CHKSUM)
+            node[self.INTERFACES] = {'Standard': {
+                'create': img_name
+            }}
+
+        # Add cloud init script if available
+        if self.CLOUD_INIT_FILE in self.props:
+            self.cloud_init_file = self.props[self.CLOUD_INIT_FILE]
+
+        # Remove
+        self.props.pop(self.ID)
+        node[self.PROPERTIES] = self.props
+
+        self.log.debug(_("{0}, VDU node: {1}").format(self, node))
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][name] = node
+
+        # Generate the connection point templates
+        for cp in self.ext_cp:
+            cpt = {self.TYPE: self.T_CP1}
+
+            cpt[self.REQUIREMENTS] = []
+            cpt[self.REQUIREMENTS].append({self.VIRT_BIND: {
+                self.NODE: self.get_name(vnf_name)
+            }})
+            if self.VLD in cp:
+                vld = cp.pop(self.VLD)
+                cpt[self.REQUIREMENTS].append({self.VIRT_LINK: {
+                    self.NODE: vld
+                }})
+
+            cpt[self.PROPERTIES] = cp
+            cp_name = cp[self.NAME].replace('/', '_')
+
+            self.log.debug(_("{0}, CP node {1}: {2}").
+                           format(self, cp_name, cpt))
+            tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][cp_name] = cpt
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        if self.image is not None:
+            image_name = os.path.basename(self.image)
+
+            files.append({
+                self.TYPE: 'image',
+                self.NAME: image_name,
+                self.DEST: "{}/{}".format(self.IMAGE_DIR, image_name),
+            })
+
+        if self.cloud_init_file is not None:
+            files.append({
+                self.TYPE: 'cloud_init',
+                self.NAME: self.cloud_init_file,
+                self.DEST: "{}/{}".format(self.CLOUD_INIT, self.cloud_init_file)
+            })
+
+        self.log.debug(_("Supporting files for {} : {}").format(self, files))
+        if not len(files):
+            shutil.rmtree(out_dir)
+
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vld.py
new file mode 100644 (file)
index 0000000..eb47daf
--- /dev/null
@@ -0,0 +1,127 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+
+TARGET_CLASS_NAME = 'YangVld'
+
+
+class YangVld(ToscaResource):
+    '''Class for RIFT.io YANG VLD descriptor translation to TOSCA type.'''
+
+    yangtype = 'vld'
+
+    OTHER_KEYS = (VNFD_CP_REF) = \
+                 ('vnfd_connection_point_ref')
+
+    VLD_TYPE_MAP = {
+        'ELAN': ToscaResource.T_VL1,
+    }
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVld, self).__init__(log,
+                                      name,
+                                      type_,
+                                      yang)
+        self.yang = yang
+        self.props = {}
+
+    def process_vld(self, vnfds):
+        self.log.debug(_("Process VLD desc {0}").format(self.name))
+
+        dic = deepcopy(self.yang)
+
+        for key in self.REQUIRED_FIELDS:
+            self.props[key] = dic.pop(key)
+
+        self.id = self.props[self.ID]
+
+        if self.TYPE_Y in dic:
+            self.props[self.TYPE] = dic.pop(self.TYPE_Y)
+            if self.props[self.TYPE] not in self.VLD_TYPE_MAP:
+                err_msg = (_("{0}: VLD type {1} not supported").
+                           format(self, self.props[self.TYPE]))
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+        if self.VNFD_CP_REF in dic:
+            for cp_ref in dic.pop(self.VNFD_CP_REF):
+                vnfd_idx = cp_ref.pop(self.MEM_VNF_INDEX_REF)
+                vnfd_id = cp_ref.pop(self.VNFD_ID_REF)
+                vnfd_cp = cp_ref.pop(self.VNFD_CP_REF)
+                if vnfd_idx in vnfds:
+                    vnfd = vnfds[vnfd_idx]
+                    if vnfd.id == vnfd_id:
+                        # Update the CP as linked to this VLD
+                        vnfd.update_cp_vld(vnfd_cp, self.name)
+                    else:
+                        err_msg = (_("{0}, The VNFD memebr index {1} and id "
+                                     "{2} did not match the VNFD {3} with "
+                                     "id {4}").format(self, vnfd_idx, vnfd_id,
+                                                      vnfd.name, vnfd.id))
+                        self.log.error(err_msg)
+                        raise ValidationError(message=err_msg)
+                else:
+                    err_msg = (_("{0}, Did not find VNFD memer index {1}").
+                               format(self, vnfd_idx))
+                    self.log.error(err_msg)
+                    raise ValidationError(message=err_msg)
+
+        self.remove_ignored_fields(dic)
+        if len(dic):
+            self.log.warn(_("{0}, Did not process the following for "
+                            "VLD {1}: {2}").
+                          format(self, self.props, dic))
+        self.log.debug(_("{0}, VLD: {1}").format(self, self.props))
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        if self.T_VL1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VL1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VL.ELAN',
+                self.PROPERTIES: {
+                    'description':
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        return tosca
+
+    def generate_tosca_template(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self, tosca))
+
+        node = {}
+        node[self.TYPE] = self.VLD_TYPE_MAP[self.props.pop(self.TYPE)]
+
+        # Remove
+        self.props.pop(self.ID)
+        self.props.pop(self.VERSION)
+        node[self.PROPERTIES] = self.props
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+
+        return tosca
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py b/common/python/rift/mano/yang_translator/rwmano/yang/yang_vnfd.py
new file mode 100644 (file)
index 0000000..7449c5a
--- /dev/null
@@ -0,0 +1,393 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from copy import deepcopy
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.yang.yang_vdu import YangVdu
+
+TARGET_CLASS_NAME = 'YangVnfd'
+
+
+class YangVnfd(ToscaResource):
+    '''Class for RIFT.io YANG VNF descriptor translation to TOSCA type.'''
+
+    yangtype = 'vnfd'
+
+    CONFIG_TYPES = ['script', 'netconf', 'rest', 'juju']
+
+    OTHER_KEYS = (MGMT_INTF, HTTP_EP, MON_PARAM) = \
+                 ('mgmt_interface', 'http_endpoint', 'monitoring_param')
+
+
+    def __init__(self,
+                 log,
+                 name,
+                 type_,
+                 yang):
+        super(YangVnfd, self).__init__(log,
+                                       name,
+                                       type_,
+                                       yang)
+        self.props = {}
+        self.vdus = []
+        self.mgmt_intf = {}
+        self.mon_param = []
+        self.http_ep = []
+
+    def handle_yang(self):
+        self.log.debug(_("Process VNFD desc {0}: {1}").format(self.name,
+                                                              self.yang))
+
+        def process_vnf_config(conf):
+            vnf_conf = {}
+            if self.CONFIG_ATTR in conf:
+                for key, value in conf.pop(self.CONFIG_ATTR).items():
+                    vnf_conf[key] = value
+
+            if self.CONFIG_TMPL in conf:
+                vnf_conf[self.CONFIG_TMPL] = conf.pop(self.CONFIG_TMPL)
+
+            def copy_config_details(conf_type, conf_details):
+                vnf_conf[self.CONFIG_TYPE] = conf_type
+                vnf_conf[self.CONFIG_DETAILS] = conf_details
+
+            for key in self.CONFIG_TYPES:
+                if key in conf:
+                    copy_config_details(key, conf.pop(key))
+                    break
+
+            if len(conf):
+                self.log.warn(_("{0}, Did not process all in VNF "
+                                "configuration {1}").
+                              format(self, conf))
+            self.log.debug(_("{0}, vnf config: {1}").format(self, vnf_conf))
+            self.props[self.VNF_CONFIG] = vnf_conf
+
+        def process_mgmt_intf(intf):
+            if len(self.mgmt_intf) > 0:
+                err_msg(_("{0}, Already processed another mgmt intf {1}, "
+                          "got another {2}").
+                        format(self, self.msmg_intf, intf))
+                self.log.error(err_msg)
+                raise ValidationError(message=err_msg)
+
+            self.mgmt_intf['protocol'] = 'tcp'
+
+            if self.PORT in intf:
+                self.mgmt_intf[self.PORT] = intf.pop(self.PORT)
+                self.props[self.PORT] = self.mgmt_intf[self.PORT]
+
+            if 'vdu_id' in intf:
+                for vdu in self.vdus:
+                    if intf['vdu_id'] == vdu.id:
+                        self.mgmt_intf[self.VDU] = vdu.get_name(self.name)
+                        intf.pop('vdu_id')
+                        break
+
+            if self.DASHBOARD_PARAMS in intf:
+                self.mgmt_intf[self.DASHBOARD_PARAMS] = \
+                                            intf.pop(self.DASHBOARD_PARAMS)
+
+            if len(intf):
+                self.log.warn(_("{0}, Did not process all in mgmt "
+                                "interface {1}").
+                              format(self, intf))
+            self.log.debug(_("{0}, Management interface: {1}").
+                           format(self, self.mgmt_intf))
+
+        def process_http_ep(eps):
+            self.log.debug("{}, HTTP EP: {}".format(self, eps))
+            for ep in eps:
+                http_ep = {'protocol': 'http'}  # Required for TOSCA
+                http_ep[self.PATH] = ep.pop(self.PATH)
+                http_ep[self.PORT] = ep.pop(self.PORT)
+                http_ep[self.POLL_INTVL] = ep.pop(self.POLL_INTVL_SECS)
+                if len(ep):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "http ep {1}").format(self, ep))
+                    self.log.debug(_("{0}, http endpoint: {1}").format(self, http_ep))
+                self.http_ep.append(http_ep)
+
+        def process_mon_param(params):
+            for param in params:
+                monp = {}
+                fields = [self.NAME, self.ID, 'value_type', 'units', 'group_tag',
+                          'json_query_method', 'http_endpoint_ref', 'widget_type',
+                          self.DESC]
+                for key in fields:
+                    if key in param:
+                        monp[key] = param.pop(key)
+
+                if len(param):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "monitporing-param {1}").
+                                  format(self, param))
+                    self.log.debug(_("{0}, Monitoring param: {1}").format(self, monp))
+                self.mon_param.append(monp)
+
+        def process_cp(cps):
+            for cp_dic in cps:
+                self.log.debug("{}, CP: {}".format(self, cp_dic))
+                name = cp_dic.pop(self.NAME)
+                for vdu in self.vdus:
+                    if vdu.has_cp(name):
+                        vdu.set_cp_type(name, cp_dic.pop(self.TYPE_Y))
+                        break
+                if len(cp_dic):
+                    self.log.warn(_("{0}, Did not process the following for "
+                                    "connection-point {1}: {2}").
+                                  format(self, name, cp_dic))
+
+        ENDPOINTS_MAP = {
+            self.MGMT_INTF: process_mgmt_intf,
+            self.HTTP_EP:  process_http_ep,
+            self.MON_PARAM: process_mon_param,
+            'connection_point': process_cp
+        }
+
+        dic = deepcopy(self.yang)
+        try:
+            for key in self.REQUIRED_FIELDS:
+                self.props[key] = dic.pop(key)
+
+            self.id = self.props[self.ID]
+
+            # Process VDUs before CPs so as to update the CP struct in VDU
+            # when we process CP later
+            if self.VDU in dic:
+                for vdu_dic in dic.pop(self.VDU):
+                    vdu = YangVdu(self.log, vdu_dic.pop(self.NAME),
+                                  self.VDU, vdu_dic)
+                    vdu.process_vdu()
+                    self.vdus.append(vdu)
+
+            for key in ENDPOINTS_MAP.keys():
+                if key in dic:
+                    ENDPOINTS_MAP[key](dic.pop(key))
+
+            if self.VNF_CONFIG in dic:
+                process_vnf_config(dic.pop(self.VNF_CONFIG))
+
+            self.remove_ignored_fields(dic)
+            if len(dic):
+                self.log.warn(_("{0}, Did not process the following for "
+                                "VNFD: {1}").
+                              format(self, dic))
+            self.log.debug(_("{0}, VNFD: {1}").format(self, self.props))
+        except Exception as e:
+            err_msg = _("Exception processing VNFD {0} : {1}"). \
+                      format(self.name, e)
+            self.log.error(err_msg)
+            raise ValidationError(message=err_msg)
+
+    def update_cp_vld(self, cp_name, vld_name):
+        for vdu in self.vdus:
+            cp = vdu.get_cp(cp_name)
+            if cp:
+                vdu.set_vld(cp_name, vld_name)
+                break
+
+    def generate_tosca_type(self, tosca):
+        self.log.debug(_("{0} Generate tosa types").
+                       format(self))
+
+        for vdu in self.vdus:
+            tosca = vdu.generate_tosca_type(tosca)
+
+        # Add data_types
+        if self.T_VNF_CONFIG not in tosca[self.DATA_TYPES]:
+            tosca[self.DATA_TYPES][self.T_VNF_CONFIG] = {
+                self.PROPERTIES:
+                {self.CONFIG_TYPE:
+                 {self.TYPE: self.STRING},
+                 'config_delay':
+                 {self.TYPE: self.INTEGER,
+                  self.DEFAULT: 0,
+                  self.REQUIRED: self.NO,
+                  self.CONSTRAINTS:
+                  [{'greater_or_equal': 0}]},
+                 'config_priority':
+                 {self.TYPE: self.INTEGER,
+                  self.CONSTRAINTS:
+                  [{'greater_than': 0}]},
+                 self.CONFIG_DETAILS:
+                 {self.TYPE: self.MAP},
+                 self.CONFIG_TMPL:
+                 {self.TYPE: self.STRING,
+                  self.REQUIRED: self.NO},
+                }
+            }
+
+        # Add capability types
+        if self.CAPABILITY_TYPES not in tosca:
+            tosca[self.CAPABILITY_TYPES] = {}
+        if self.T_HTTP_EP not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_HTTP_EP] = {
+                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
+                self.PROPERTIES: {
+                    'polling_interval':
+                    {self.TYPE: self.INTEGER},
+                    'path':
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        if self.T_MGMT_INTF not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_MGMT_INTF] = {
+                self.DERIVED_FROM: 'tosca.capabilities.Endpoint',
+                self.PROPERTIES: {
+                    self.DASHBOARD_PARAMS:
+                    {self.TYPE: self.MAP},
+                    self.VDU:
+                    {self.TYPE: self.STRING},
+                },
+            }
+
+        if self.T_MON_PARAM not in tosca[self.CAPABILITY_TYPES]:
+            tosca[self.CAPABILITY_TYPES][self.T_MON_PARAM] = {
+                self.DERIVED_FROM: 'tosca.capabilities.nfv.Metric',
+                self.PROPERTIES: {
+                    'id':
+                    {self.TYPE: self.INTEGER},
+                    'name':
+                    {self.TYPE: self.STRING},
+                    'value_type':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'INT'},
+                    'group_tag':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'Group1'},
+                    'units':
+                    {self.TYPE: self.STRING},
+                    'description':
+                    {self.TYPE: self.STRING},
+                    'json_query_method':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'NAMEKEY'},
+                    'http_endpoint_ref':
+                    {self.TYPE: self.STRING},
+                    'widget_type':
+                    {self.TYPE: self.STRING,
+                     self.DEFAULT: 'COUNTER'},
+                }
+            }
+
+        # Define the VNF type
+        if self.T_VNF1 not in tosca[self.NODE_TYPES]:
+            tosca[self.NODE_TYPES][self.T_VNF1] = {
+                self.DERIVED_FROM: 'tosca.nodes.nfv.VNF',
+                self.PROPERTIES: {
+                    'vnf_configuration':
+                    {self.TYPE: self.T_VNF_CONFIG},
+                    'port':
+                    {self.TYPE: self.INTEGER,
+                     self.CONSTRAINTS:
+                     [{'in_range': '[1, 65535]'}]},
+                    self.START_BY_DFLT:
+                    {self.TYPE: self.BOOL,
+                     self.DEFAULT: self.TRUE},
+                },
+                self.CAPABILITIES: {
+                    'mgmt_interface':
+                    {self.TYPE: self.T_MGMT_INTF},
+                    'http_endpoint':
+                    {self.TYPE: self.T_HTTP_EP},
+                    'monitoring_param_0':
+                    {self.TYPE: self.T_MON_PARAM},
+                    'monitoring_param_1':
+                    {self.TYPE: self.T_MON_PARAM},
+                },
+                self.REQUIREMENTS: [
+                    {'vdus':
+                     {self.TYPE: 'tosca.capabilities.nfv.VirtualLinkable',
+                      self.RELATIONSHIP:
+                      'tosca.relationships.nfv.VirtualLinksTo',
+                      self.NODE: self.T_VDU1,
+                      self.OCCURENCES: '[1, UNBOUND]'}}
+                ],
+            }
+
+        return tosca
+
+    def generate_vnf_template(self, tosca, index):
+        self.log.debug(_("{0}, Generate tosca template for VNF {1}").
+                       format(self, index, tosca))
+
+        for vdu in self.vdus:
+            tosca = vdu.generate_vdu_template(tosca, self.name)
+
+        node = {}
+        node[self.TYPE] = self.T_VNF1
+
+        # Remove fields not required in TOSCA
+        self.props.pop(self.DESC)
+
+        # Update index to the member-vnf-index
+        self.props[self.ID] = index
+        node[self.PROPERTIES] = self.props
+
+        caps = {}
+        if len(self.mgmt_intf):
+            caps[self.MGMT_INTF] = {
+                self.PROPERTIES: self.mgmt_intf
+            }
+
+        if len(self.http_ep):
+            caps[self.HTTP_EP] = {
+                self.PROPERTIES: self.http_ep[0]
+            }
+            if len(self.http_ep) > 1:
+                self.log.warn(_("{0}: Currently only one HTTP endpoint "
+                                "supported: {1}").
+                              format(self, self.http_ep))
+
+        if len(self.mon_param):
+            count = 0
+            for monp in self.mon_param:
+                name = "{}_{}".format(self.MON_PARAM, count)
+                caps[name] = {self.PROPERTIES: monp}
+                count += 1
+
+        node[self.CAPABILITIES] = caps
+
+        if len(self.vdus):
+            reqs = []
+            for vdu in self.vdus:
+                reqs.append({'vdus': {self.NODE: vdu.get_name(self.name)}})
+
+            node[self.REQUIREMENTS] = reqs
+        else:
+            self.log.warn(_("{0}, Did not find any VDUS with this VNF").
+                          format(self))
+
+        self.log.debug(_("{0}, VNF node: {1}").format(self, node))
+
+        tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][self.name] = node
+
+        return tosca
+
+    def get_supporting_files(self):
+        files = []
+
+        for vdu in self.vdus:
+            f = vdu.get_supporting_files()
+            if f and len(f):
+                files.extend(f)
+
+        return files
diff --git a/common/python/rift/mano/yang_translator/rwmano/yang_translator.py b/common/python/rift/mano/yang_translator/rwmano/yang_translator.py
new file mode 100644 (file)
index 0000000..907a4a0
--- /dev/null
@@ -0,0 +1,220 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import shutil
+import subprocess
+import tarfile
+
+from rift.mano.yang_translator.common.exception import ValidationError
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
+    import ToscaResource
+from rift.mano.yang_translator.rwmano.syntax.tosca_template \
+    import ToscaTemplate
+from rift.mano.yang_translator.rwmano.translate_descriptors \
+    import TranslateDescriptors
+
+import rift.package.image
+from rift.package.package import TarPackageArchive
+import rift.package.cloud_init
+import rift.package.script
+import rift.package.store
+
+
+class YangTranslator(object):
+    '''Invokes translation methods.'''
+
+    def __init__(self, log, yangs=None, files=None, packages=[]):
+        super(YangTranslator, self).__init__()
+        self.log = log
+        self.yangs = {}
+        if yangs is not None:
+            self.yangs = yangs
+        self.files = files
+        self.archive = None
+        self.tosca_template = ToscaTemplate(log)
+        self.node_translator = None
+        self.pkgs = packages
+        log.info(_('Initialized parameters for translation.'))
+
+    def translate(self):
+        if self.files:
+            self.get_yangs()
+
+        self.node_translator = TranslateDescriptors(self.log,
+                                                      self.yangs,
+                                                      self.tosca_template)
+
+        self.tosca_template.resources = self.node_translator.translate()
+
+        return self.tosca_template.output_to_tosca()
+
+    def get_yangs(self):
+        '''Get the descriptors and convert to yang instances'''
+        for filename in self.files:
+            self.log.debug(_("Load file {0}").format(filename))
+            # Only one descriptor per file
+            if tarfile.is_tarfile(filename):
+                tar = open(filename, "r+b")
+                archive = TarPackageArchive(self.log, tar)
+                pkg = archive.create_package()
+                self.pkgs.append(pkg)
+                desc_type = pkg.descriptor_type
+                if desc_type == TranslateDescriptors.NSD:
+                    if TranslateDescriptors.NSD not in self.yangs:
+                        self.yangs[TranslateDescriptors.NSD] = []
+                    self.yangs[TranslateDescriptors.NSD]. \
+                        append(pkg.descriptor_msg.as_dict())
+                elif desc_type == TranslateDescriptors.VNFD:
+                    if TranslateDescriptors.VNFD not in self.yangs:
+                        self.yangs[TranslateDescriptors.VNFD] = []
+                    self.yangs[TranslateDescriptors.VNFD]. \
+                        append(pkg.descriptor_msg.as_dict())
+                else:
+                    raise ValidationError("Unknown descriptor type: {}".
+                                          format(desc_type))
+
+    def _create_csar_files(self, output_dir, name, tmpl,
+                           archive=False):
+        if ToscaTemplate.TOSCA not in tmpl:
+            self.log.error(_("Did not find TOSCA template for {0}").
+                           format(name))
+            return
+
+        # Create sub for each NS template
+        subdir = os.path.join(output_dir, name)
+        if os.path.exists(subdir):
+            shutil.rmtree(subdir)
+        os.makedirs(subdir)
+
+        # Create the definitions dir
+        def_dir = os.path.join(subdir, 'Definitions')
+        os.makedirs(def_dir)
+        entry_file = os.path.join(def_dir, name+'.yaml')
+        self.log.debug(_("Writing file {0}").
+                       format(entry_file))
+        with open(entry_file, 'w+') as f:
+            f.write(tmpl[ToscaTemplate.TOSCA])
+
+        # Create the Tosca meta
+        meta_dir = os.path.join(subdir, 'TOSCA-Metadata')
+        os.makedirs(meta_dir)
+        meta = '''TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/'''
+        meta_data = "{}{}".format(meta, name+'.yaml')
+        meta_file = os.path.join(meta_dir, 'TOSCA.meta')
+        self.log.debug(_("Writing file {0}:\n{1}").
+                       format(meta_file, meta_data))
+        with open(meta_file, 'w+') as f:
+            f.write(meta_data)
+
+        # Copy other supporting files
+        if ToscaTemplate.FILES in tmpl:
+            for f in tmpl[ToscaTemplate.FILES]:
+                self.log.debug(_("Copy supporting file {0}").format(f))
+
+                # Search in source packages
+                if len(self.pkgs):
+                    for pkg in self.pkgs:
+                        # TODO(pjoseph): Need to add support for other file types
+                        fname = f[ToscaResource.NAME]
+                        dest_path = os.path.join(subdir, f[ToscaResource.DEST])
+                        ftype = f[ToscaResource.TYPE]
+
+                        if ftype == 'image':
+                            image_file_map = rift.package.image.get_package_image_files(pkg)
+
+                            if fname in image_file_map:
+                                self.log.debug(_("Extracting image {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(image_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        elif ftype == 'script':
+                            script_file_map = \
+                                rift.package.script.PackageScriptExtractor.package_script_files(pkg)
+                            if fname in script_file_map:
+                                self.log.debug(_("Extracting script {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(script_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        elif ftype == 'cloud_init':
+                            script_file_map = \
+                                rift.package.cloud_init.PackageCloudInitExtractor.package_script_files(pkg)
+                            if fname in script_file_map:
+                                self.log.debug(_("Extracting script {0} to {1}").
+                                               format(fname, dest_path))
+                                pkg.extract_file(script_file_map[fname],
+                                                 dest_path)
+                                break
+
+                        else:
+                            self.log.warn(_("Unknown file type {0}: {1}").
+                                          format(ftype, f))
+
+                #TODO(pjoseph): Search in other locations
+
+        # Create the ZIP archive
+        if archive:
+            prev_dir=os.getcwd()
+            os.chdir(subdir)
+
+            try:
+                zip_file = name + '.zip'
+                zip_path = os.path.join(output_dir, zip_file)
+                self.log.debug(_("Creating zip file {0}").format(zip_path))
+                zip_cmd = "zip -r {}.partial ."
+                subprocess.check_call(zip_cmd.format(zip_path),
+                                      shell=True,
+                                      stdout=subprocess.DEVNULL)
+                mv_cmd = "mv {0}.partial {0}"
+                subprocess.check_call(mv_cmd.format(zip_path),
+                                      shell=True,
+                                      stdout=subprocess.DEVNULL)
+                shutil.rmtree(subdir)
+                return zip_path
+
+            except subprocess.CalledProcessError as e:
+                self.log.error(_("Creating CSAR archive failed: {0}").
+                               format(e))
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                os.chdir(prev_dir)
+
+    def write_output(self, output,
+                     output_dir=None,
+                     archive=False,):
+        if output:
+            zip_files = []
+            for key in output.keys():
+                if output_dir:
+                    zf = self._create_csar_files(output_dir,
+                                                 key,
+                                                 output[key],
+                                                 archive=archive,)
+                    zip_files.append(zf)
+                else:
+                    print(_("TOSCA Template {0}:\n{1}").
+                          format(key, output[key]))
+            return zip_files
diff --git a/common/python/rift/mano/yang_translator/shell.py b/common/python/rift/mano/yang_translator/shell.py
new file mode 100644 (file)
index 0000000..f353e92
--- /dev/null
@@ -0,0 +1,166 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import logging.config
+import os
+
+import magic
+
+from rift.mano.yang_translator.common.utils import _
+from rift.mano.yang_translator.rwmano.yang_translator import YangTranslator
+
+
+"""
+Test the yang translation from command line as:
+#translator
+  --template-file=<path to the JSON template or tar.gz>
+  --template-type=<type of template e.g. yang>
+  --parameters="purpose=test"
+  --output_dir=<output directory>
+  --validate_only
+Takes four user arguments,
+1. type of translation (e.g. yang) (required)
+2. Path to the file that needs to be translated (required)
+3. Input parameters (optional)
+4. Write to output files in a dir (optional), else print on screen
+
+In order to use translator to only validate template,
+without actual translation, pass --validate-only along with
+other required arguments.
+
+"""
+
+
+class TranslatorShell(object):
+
+    SUPPORTED_TYPES = ['yang']
+    COPY_DIRS = ['images']
+    SUPPORTED_INPUTS = (TAR, JSON, XML, YAML) = ('tar', 'json', 'xml', 'yaml')
+
+    def _parse_args(self, raw_args=None):
+        parser = argparse.ArgumentParser(
+            description='RIFT.io YANG translator for descriptors')
+        parser.add_argument(
+            "-f",
+            "--template-file",
+            nargs="+",
+            required=True,
+            action="append",
+            help="Template file to translate")
+        parser.add_argument(
+            "-o",
+            "--output-dir",
+            default=None,
+            help="Directory to output")
+        parser.add_argument(
+            "-p", "--parameters",
+            help="Input parameters")
+        parser.add_argument(
+            "--archive",
+            help="Create a ZIP archive",
+            action="store_true")
+        parser.add_argument(
+            "--debug",
+            help="Enable debug logging",
+            action="store_true")
+        if raw_args:
+            args = parser.parse_args(raw_args)
+        else:
+            args = parser.parse_args()
+        return args
+
+    def main(self, raw_args=None, log=None):
+        args = self._parse_args(raw_args)
+        if log is None:
+            if args.debug:
+                logging.basicConfig(level=logging.DEBUG)
+            else:
+                logging.basicConfig(level=logging.ERROR)
+            log = logging.getLogger("yang-translator")
+
+        log.debug(_("Args passed is {}").format(args))
+        self.log = log
+        self.in_files = []
+        self.ftype = None
+        for f in args.template_file:
+            path = os.path.abspath(f[0])
+            if not os.path.isfile(path):
+                msg = _("The path %(path)s is not a valid file.") % {
+                    'path': path}
+                log.error(msg)
+                raise ValueError(msg)
+            # Get the file type
+            ftype = self._get_file_type(path)
+            if self.ftype is None:
+                self.ftype = ftype
+            elif self.ftype != ftype:
+                msg = (_("All input files hould be of same type"))
+                log.error(msg)
+                raise ValueError(msg)
+            self.in_files.append(path)
+
+        self.log.debug(_("Input files are of type {0}").
+                       format(self.ftype))
+
+        self.archive = None
+        self._translate(output_dir=args.output_dir,
+                        archive=args.archive)
+
+    def _translate(self, output_dir=None, archive=False):
+        output = None
+        self.log.debug(_('Loading the yang template for {0}.').
+                       format(self.in_files))
+        translator = YangTranslator(self.log, files=self.in_files)
+        self.log.debug(_('Translating the yang template for {0}.').
+                       format(self.in_files))
+        output = translator.translate()
+        if output:
+            if output_dir:
+                translator.write_output(output,
+                                        output_dir=output_dir,
+                                        archive=archive)
+            else:
+                for key in output.keys():
+                    print(_("TOSCA Template {0}:\n{1}").
+                          format(key, output[key]))
+        else:
+            self.log.error(_("Did not get any translated output!!"))
+
+
+    def _get_file_type(self, path):
+        m = magic.open(magic.MAGIC_MIME)
+        m.load()
+        typ = m.file(path)
+        if typ.startswith('text/plain'):
+            # Assume to be yaml
+            return self.YAML
+        elif typ.startswith('application/x-gzip'):
+            return self.TAR
+        else:
+            msg = _("The file {0} is not a supported type: {1}"). \
+                  format(path, typ)
+            self.log.error(msg)
+            raise ValueError(msg)
+
+
+def main(args=None, log=None):
+    TranslatorShell().main(raw_args=args, log=log)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml b/common/python/rift/mano/yang_translator/test/data/ping_pong_tosca.yaml
new file mode 100644 (file)
index 0000000..9a35a7e
--- /dev/null
@@ -0,0 +1,390 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: Toy NS
+metadata:
+  ID: ping_pong_nsd
+  vendor: RIFT.io
+  version: 1.0
+data_types:
+  tosca.datatypes.network.riftio.vnf_configuration:
+    properties:
+      config_delay:
+        constraints:
+        - greater_or_equal: 0
+        default: 0
+        required: no
+        type: integer
+      config_details:
+        type: map
+      config_priority:
+        constraints:
+        - greater_than: 0
+        type: integer
+      config_template:
+        required: no
+        type: string
+      config_type:
+        type: string
+capability_types:
+  tosca.capabilities.riftio.mgmt_interface_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      dashboard_params:
+        type: map
+      vdu:
+        type: string
+  tosca.capabilities.riftio.http_endpoint_type:
+    derived_from: tosca.capabilities.Endpoint
+    properties:
+      path:
+        type: string
+      polling_interval:
+        type: integer
+  tosca.capabilities.riftio.monitoring_param:
+    derived_from: tosca.capabilities.nfv.Metric
+    properties:
+      description:
+        type: string
+      group_tag:
+        default: Group1
+        type: string
+      http_endpoint_ref:
+        type: string
+      id:
+        type: integer
+      json_query_method:
+        default: NAMEKEY
+        type: string
+      name:
+        type: string
+      units:
+        type: string
+      value_type:
+        default: INT
+        type: string
+      widget_type:
+        default: COUNTER
+        type: string
+node_types:
+  tosca.nodes.riftio.CP1:
+    derived_from: tosca.nodes.nfv.CP
+    properties:
+      cp_type:
+        description: Type of the connection point
+        type: string
+      name:
+        description: Name of the connection point
+        type: string
+      vdu_intf_name:
+        description: Name of the interface on VDU
+        type: string
+      vdu_intf_type:
+        description: Type of the interface on VDU
+        type: string
+  tosca.nodes.riftio.VL1:
+    derived_from: tosca.nodes.nfv.VL.ELAN
+    properties:
+      description:
+        type: string
+  tosca.nodes.riftio.VNF1:
+    derived_from: tosca.nodes.nfv.VNF
+    properties:
+      port:
+        constraints:
+        - in_range: [1, 65535]
+        type: integer
+      start_by_default:
+        type: boolean
+        default: true
+      vnf_configuration:
+        type: tosca.datatypes.network.riftio.vnf_configuration
+    capabilities:
+      http_endpoint:
+        type: tosca.capabilities.riftio.http_endpoint_type
+      mgmt_interface:
+        type: tosca.capabilities.riftio.mgmt_interface_type
+      monitoring_param_0:
+        type: tosca.capabilities.riftio.monitoring_param
+      monitoring_param_1:
+        type: tosca.capabilities.riftio.monitoring_param
+    requirements:
+    - vdus:
+        node: tosca.nodes.riftio.VDU1
+        occurences: [1, UNBOUND]
+        relationship: tosca.relationships.nfv.VirtualLinksTo
+        type: tosca.capabilities.nfv.VirtualLinkable
+  tosca.nodes.riftio.VDU1:
+    derived_from: tosca.nodes.nfv.VDU
+    properties:
+      cloud_init:
+        default: #cloud-config
+        type: string
+      count:
+        default: 1
+        type: integer
+    capabilities:
+      virtualLink:
+        type: tosca.capabilities.nfv.VirtualLinkable
+group_types:
+  tosca.groups.riftio.ConfigPrimitives:
+    derived_from: tosca.policies.Root
+    properties:
+      primitive: map
+policy_types:
+  tosca.policies.riftio.InitialConfigPrimitive:
+    derived_from: tosca.policies.Root
+    properties:
+      name:
+        type: string
+      parameter:
+        type: map
+      seq:
+        type: integer
+      user_defined_script:
+        type: string
+  tosca.policies.riftio.ScalingGroup:
+    derived_from: tosca.policies.Root
+    properties:
+      config_actions:
+        type: map
+      max_instance_count:
+        type: integer
+      min_instance_count:
+        type: integer
+      name:
+        type: string
+      vnfd_members:
+        type: map
+topology_template:
+  policies:
+  - scaling_group_descriptor:
+      config_actions:
+        post_scale_out: ping config
+      max_instance_count: 10
+      min_instance_count: 1
+      name: ping_group
+      type: tosca.policies.riftio.ScalingGroup
+      vnfd_members:
+        ping_vnfd: 1
+  - initial_config_primitive:
+      name: start traffic
+      seq: 1
+      type: tosca.policies.riftio.InitialConfigPrimitive
+      user_defined_script: start_traffic.py
+  groups:
+    config_primitive:
+      type: tosca.groups.riftio.ConfigPrimitives
+      members:
+      - ping_vnfd
+      - pong_vnfd
+      properties:
+        primitives:
+          ping config:
+            user_defined_script: ping_config.py
+  inputs:
+    vendor:
+      type: string
+      description: Translated from YANG
+  node_templates:
+    ping_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 1
+        port: 18888
+        start_by_default: false
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 0
+          config_details:
+            script_type: bash
+          config_priority: 2
+          config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
+            ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
+            \ to configure VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server info for\
+            \ ping!\"\n    exit $rc\nfi\n\ncurl -D /dev/stdout \\\n    -H \"Accept:\
+            \ application/vnd.yang.data+xml\" \\\n    -H \"Content-Type: application/vnd.yang.data+json\"\
+            \ \\\n    -X POST \\\n    -d \"{\\\"rate\\\":$ping_rate}\" \\\n    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set ping rate!\"\n\
+            \    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/ping/stats
+            polling_interval: 2
+            port: 18888
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/ping/stats
+              port: 18888
+            port: 18888
+            protocol: tcp
+            vdu: ping_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/ping/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: ping_vnfd_iovdu_0
+    pong_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, pong.service ]\n  - [ systemctl, start, --no-block, pong.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        pong_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: pong_vnfd_iovdu_0_vm_image
+    pong_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: pong_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: pong_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    ping_pong_vld:
+      type: tosca.nodes.riftio.VL1
+      properties:
+        description: Toy VL
+        vendor: RIFT.io
+    ping_vnfd_cp0:
+      type: tosca.nodes.riftio.CP1
+      properties:
+        cp_type: VPORT
+        name: ping_vnfd/cp0
+        vdu_intf_name: eth0
+        vdu_intf_type: VIRTIO
+      requirements:
+      - virtualBinding:
+          node: ping_vnfd_iovdu_0
+      - virtualLink:
+          node: ping_pong_vld
+    pong_vnfd:
+      type: tosca.nodes.riftio.VNF1
+      properties:
+        id: 2
+        port: 18889
+        vendor: RIFT.io
+        version: 1.0
+        vnf_configuration:
+          config_delay: 60
+          config_details:
+            script_type: bash
+          config_priority: 1
+          config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
+            pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
+            \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
+            \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
+            \ VNF\ncurl -D /dev/stdout \\\n    -H \"Accept: application/vnd.yang.data+xml\"\
+            \ \\\n    -H \"Content-Type: application/vnd.yang.data+json\" \\\n   \
+            \ -X POST \\\n    -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
+            \":$server_port}\" \\\n    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
+            rc=$?\nif [ $rc -ne 0 ]\nthen\n    echo \"Failed to set server(own) info\
+            \ for pong!\"\n    exit $rc\nfi\n\nexit 0\n"
+          config_type: script
+      capabilities:
+        http_endpoint:
+          properties:
+            path: api/v1/pong/stats
+            polling_interval: 2
+            port: 18889
+            protocol: http
+        mgmt_interface:
+          properties:
+            dashboard_params:
+              path: api/v1/pong/stats
+              port: 18889
+            port: 18889
+            protocol: tcp
+            vdu: pong_vnfd_iovdu_0
+        monitoring_param_0:
+          properties:
+            description: no of ping requests
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 1
+            json_query_method: NAMEKEY
+            name: ping-request-rx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+        monitoring_param_1:
+          properties:
+            description: no of ping responses
+            group_tag: Group1
+            http_endpoint_ref: api/v1/pong/stats
+            id: 2
+            json_query_method: NAMEKEY
+            name: ping-response-tx-count
+            units: packets
+            value_type: INT
+            widget_type: COUNTER
+      requirements:
+      - vdus:
+          node: pong_vnfd_iovdu_0
+    ping_vnfd_iovdu_0:
+      type: tosca.nodes.riftio.VDU1
+      properties:
+        cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+          ssh_pwauth: True\nruncmd:\n  - [ systemctl, daemon-reload ]\n  - [ systemctl,\
+          \ enable, ping.service ]\n  - [ systemctl, start, --no-block, ping.service\
+          \ ]\n  - [ ifup, eth1 ]\n"
+        count: 1
+      capabilities:
+        host:
+          properties:
+            disk_size: 4 GB
+            mem_size: 512 MB
+            num_cpus: 1
+      artifacts:
+        ping_vnfd_iovdu_0_vm_image:
+          file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+          image_checksum: 1234567890abcdefg
+          type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+      interfaces:
+        Standard:
+          create: ping_vnfd_iovdu_0_vm_image
diff --git a/common/python/rift/mano/yang_translator/test/data/yang_helloworld.json b/common/python/rift/mano/yang_translator/test/data/yang_helloworld.json
new file mode 100644 (file)
index 0000000..e5ff679
--- /dev/null
@@ -0,0 +1,25 @@
+module helloworld {
+
+    namespace "http://helloworld.com/ns/helloworld";
+
+    prefix "helloworld";
+
+    organization  "helloworld organization";
+
+    description
+      "helloworld module";
+
+    revision 2013-04-02 {
+        description
+            "Initial revision";
+    }
+
+    container helloworld {
+        description
+           "Helloworld example for creating YANG-netconfd SIL modules";
+        leaf message {
+            config false;
+            type string;
+        }
+    }
+}
diff --git a/common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json b/common/python/rift/mano/yang_translator/test/data/yang_helloworld_invalid.json
new file mode 100644 (file)
index 0000000..1db0555
--- /dev/null
@@ -0,0 +1,28 @@
+module helloworld {
+
+    namespace "http://helloworld.com/ns/helloworld";
+
+    prefix "helloworld";
+
+    organization  "helloworld organization";
+
+    description
+      "helloworld module";
+
+    revision 2013-04-02 {
+        description
+            "Initial revision";
+    }
+
+    container helloworld {
+        description
+           "Helloworld example for creating YANG-netconfd SIL modules";
+        leaf message {
+            config false;
+            type string;
+        }
+        leaf invalid {
+          type invalid;
+        }
+    }
+}
diff --git a/common/python/rift/mano/yang_translator/test/yang_translator_ut.py b/common/python/rift/mano/yang_translator/test/yang_translator_ut.py
new file mode 100755 (executable)
index 0000000..100aeb5
--- /dev/null
@@ -0,0 +1,217 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import xmlrunner
+
+import unittest
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+import rift.mano.tosca_translator.shell as tshell
+
+import rift.mano.utils.compare_desc as cmpdesc
+
+from rift.mano.tosca_translator.common.utils import ChecksumUtils
+
+from rift.mano.yang_translator.common.utils import _
+import rift.mano.yang_translator.shell as shell
+
+
+_TRUE_VALUES = ('True', 'true', '1', 'yes')
+
+
+class PingPongDescriptors(object):
+
+    def __init__(self, output_dir, log):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    fmt='yaml',
+                    write_to_file=True,
+                    out_dir=output_dir,
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                    use_placement_group = False,
+                )
+
+        # Create the tar files in output dir
+        def create_archive(desc):
+            # Create checksum file
+            cur_dir = os.path.join(output_dir, desc)
+            flist = {}
+            for root, dirs, files in os.walk(cur_dir):
+                rel_dir = root.replace(cur_dir+'/', '')
+                for f in files:
+                    fpath = os.path.join(root, f)
+                    flist[os.path.join(rel_dir, f)] = \
+                                        ChecksumUtils.get_md5(fpath)
+            log.debug(_("Files in {}: {}").format(cur_dir, flist))
+
+            chksumfile = os.path.join(cur_dir, 'checksums.txt')
+            with open(chksumfile, 'w') as c:
+                for key in sorted(flist.keys()):
+                    c.write("{}  {}\n".format(flist[key], key))
+
+            # Create the tar archive
+            tar_cmd = "tar zcvf {0}.tar.gz {0}"
+            subprocess.check_call(tar_cmd.format(desc),
+                                  shell=True,
+                                  stdout=subprocess.DEVNULL)
+
+        prevdir = os.getcwd()
+        os.chdir(output_dir)
+        for d in os.listdir(output_dir):
+            create_archive(d)
+        os.chdir(prevdir)
+
+class TestYangTranslator(unittest.TestCase):
+
+    yang_helloworld = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)),
+        "data/yang_helloworld.json")
+    template_file = '--template-file=' + yang_helloworld
+    template_validation = "--validate-only"
+    debug="--debug"
+    failure_msg = _('The program raised an exception unexpectedly.')
+
+    default_timeout = 0
+    log_level = logging.WARN
+    log = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+            '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('yang-translator-ut')
+        cls.log.addHandler(stderr_handler)
+
+        cls.desc_dir = tempfile.mkdtemp()
+        PingPongDescriptors(cls.desc_dir, cls.log)
+        cls.log.debug("Yang comaprison descs in {}".format(cls.desc_dir))
+
+    @classmethod
+    def tearDownClass(cls):
+        '''Clean up temporary directory'''
+        # Remove directory if not debug level
+        if cls.log_level != logging.DEBUG:
+            shutil.rmtree(cls.desc_dir)
+        else:
+            cls.log.warn("Descriptor directory: {}".format(cls.desc_dir))
+
+    def test_missing_arg(self):
+        self.assertRaises(SystemExit, shell.main, '')
+
+    def test_invalid_file_arg(self):
+        self.assertRaises(SystemExit, shell.main, 'translate me')
+
+    def test_invalid_file_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          ('--template-file=template.txt'))
+
+    def test_invalid_type_value(self):
+        self.assertRaises(SystemExit,
+                          shell.main,
+                          (self.template_file,
+                           '--template-type=xyz'))
+
+    def compare_tosca(self, gen_desc, exp_desc):
+        gen = "--generated="+gen_desc
+        exp = "--expected="+exp_desc
+        cmpdesc.main([gen, exp])
+
+    def test_output(self):
+        test_base_dir = os.path.join(os.path.dirname(
+            os.path.abspath(__file__)), 'data')
+        temp_dir = tempfile.mkdtemp()
+        args = []
+        for f in os.listdir(self.desc_dir):
+            fpath = os.path.join(self.desc_dir, f)
+            if os.path.isfile(fpath):
+                template = '--template-file='+fpath
+                args.append(template)
+        output_dir = "--output-dir=" + temp_dir
+        args.append(output_dir)
+        self.log.debug("Args passed: {}".format(args))
+
+        try:
+            shell.main(args, log=self.log)
+
+            # Check the dirs are present
+            out_dir = os.path.join(temp_dir, 'ping_pong_nsd')
+            self.assertTrue(os.path.isdir(out_dir))
+            dirs = os.listdir(out_dir)
+            expected_dirs = ['TOSCA-Metadata', 'Definitions']
+            self.assertTrue(set(expected_dirs) <= set(dirs))
+
+            # Compare the descriptors
+            gen_desc = os.path.join(out_dir, 'Definitions', 'ping_pong_nsd.yaml')
+            exp_desc = os.path.join(test_base_dir,
+                                    'ping_pong_tosca.yaml')
+            self.compare_tosca(gen_desc, exp_desc)
+
+            # Convert back to yang and compare
+            template = '--template-file='+gen_desc
+            yang_out_dir = os.path.join(temp_dir, 'ping_pong_yang')
+            output_dir = "--output-dir=" + yang_out_dir
+            tshell.main([template, output_dir], log=self.log)
+
+            # Check the dirs are present
+            dirs = os.listdir(yang_out_dir)
+            self.assertTrue(len(dirs) >= 3)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail(_("Exception {}").format(e))
+
+        finally:
+            if temp_dir:
+                shutil.rmtree(temp_dir)
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    TestYangTranslator.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/rift/mano/yang_translator/translator_logging.conf b/common/python/rift/mano/yang_translator/translator_logging.conf
new file mode 100644 (file)
index 0000000..968ebc9
--- /dev/null
@@ -0,0 +1,43 @@
+
+[loggers]
+keys=root,yang-translator
+
+[handlers]
+keys=RotatingFileHandler,SysLogHandler,NullHandler
+
+[formatters]
+keys=form01
+
+[logger_root]
+level=DEBUG
+handlers=NullHandler
+
+[logger_tosca-translator]
+level=INFO
+#one can be removed based on requirements
+handlers=SysLogHandler, RotatingFileHandler
+qualname=tosca-translator
+propagate=1
+
+[handler_RotatingFileHandler]
+class=handlers.RotatingFileHandler
+level=INFO
+formatter=form01
+#rotation happens after 100MB
+args=('/var/log/yang-translator.log', 'a', 100000000, 5, 'utf8')
+
+[handler_SysLogHandler]
+class=handlers.SysLogHandler
+formatter=form01
+level=INFO
+args=('/dev/log', handlers.SysLogHandler.LOG_SYSLOG)
+
+[handler_NullHandler]
+class=NullHandler
+formatter=form01
+level=DEBUG
+args=()
+
+[formatter_form01]
+format = %(asctime)s - %(name)s - %(levelname)s - %(filename)s : %(message)s
+datefmt =
diff --git a/common/python/rift/mano/yang_translator/yang-translator b/common/python/rift/mano/yang_translator/yang-translator
new file mode 100755 (executable)
index 0000000..a4c0ee6
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from rift.mano.yang_translator import shell as translator_shell
+
+if __name__ == '__main__':
+    translator_shell.main()
diff --git a/common/python/test/CMakeLists.txt b/common/python/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..1abb50d
--- /dev/null
@@ -0,0 +1,9 @@
+# Creation Date: 2016/1/12
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+cmake_minimum_required(VERSION 2.8)
+
+rift_py3test(utest_juju_api
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_juju_api.py
+  )
diff --git a/common/python/test/utest_config_data.py b/common/python/test/utest_config_data.py
new file mode 100644 (file)
index 0000000..8287c11
--- /dev/null
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import os
+import sys
+import tempfile
+import unittest
+import xmlrunner
+import yaml
+
+
+from rift.mano.config_data import config
+
+import gi
+gi.require_version('VnfdYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        VnfdYang,
+        RwYang,
+        )
+
+class InitialPrimitiveReaderTest(unittest.TestCase):
+    def test_read_valid_config(self):
+        input_prim_data = [
+                {
+                    "name": "prim_1",
+                    "parameter": {
+                        "hostname": "pe1",
+                        #"pass": "6windos"
+                        # Hard to compare with multiple elements because ordering of list
+                        # element is not deterministic.
+                    }
+                },
+                {
+                    "name": "prim_2",
+                    # No, parameters (use default values)
+                },
+            ]
+
+        with io.StringIO() as yaml_hdl:
+            yaml_hdl.write(yaml.safe_dump(input_prim_data))
+            yaml_hdl.seek(0)
+            reader = config.VnfInitialConfigPrimitiveReader.from_yaml_file_hdl(yaml_hdl)
+
+        expected_primitives = [
+                VnfdYang.InitialConfigPrimitive.from_dict({
+                        "name": "prim_1", "seq": 0, "parameter": [
+                            {
+                                "name": "hostname",
+                                "value": "pe1",
+                            },
+                        ]
+                    }),
+                VnfdYang.InitialConfigPrimitive.from_dict({
+                        "name": "prim_2", "seq": 1
+                    }),
+                ]
+
+        for i, prim in enumerate(reader.primitives):
+            logging.debug("Expected: %s", str(expected_primitives[i]))
+            logging.debug("Got: %s", str(prim))
+            self.assertEqual(expected_primitives[i], prim)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/python/test/utest_juju_api.py b/common/python/test/utest_juju_api.py
new file mode 100755 (executable)
index 0000000..7da9e81
--- /dev/null
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import argparse
+import asyncio
+import logging
+from unittest import mock
+import os
+import sys
+import unittest
+import xmlrunner
+
+import rift.mano.utils.juju_api as juju_api
+
+
+class JujuClientTest(unittest.TestCase):
+
+    log = None
+
+    @classmethod
+    def set_logger(cls, log):
+        cls.log = log
+
+    @asyncio.coroutine
+    def juju_client_test(self, mock_jujuclient, loop):
+        api = juju_api.JujuApi(secret='test', loop=loop, version=1)
+
+        env = yield from api.get_env()
+
+        self.assertTrue(env.login.called,
+                        "Login to Juju not called")
+        env.login.assert_called_with('test', user='user-admin')
+
+        charm = 'test-charm'
+        service = 'test-service'
+        yield from api.deploy_service(charm, service)
+        # self.assertTrue(env.deploy.called,
+        #                "Deploy failed")
+
+        config = {
+            'test_param': 'test_value',
+        }
+        yield from api.apply_config(config, env=env)
+        self.assertTrue(env.set_config.called,
+                        "Config failed")
+
+        try:
+            yield from api.resolve_error(env=env)
+        except KeyError as e:
+            # Since the status does have values, this throws error
+            pass
+        # resolved method will not be called  due to error above
+        self.assertFalse(env.resolved.called,
+                        "Resolve error failed")
+
+        action = 'test-action'
+        params = {}
+        api.units = ['test-service-0']
+        # yield from api.execute_action(action, params, service=service, env=env)
+
+        action_tag = 'test-123434352'
+        # yield from api.get_action_status(action_tag)
+
+        api.destroy_retries = 2
+        api.retry_delay = 0.1
+        try:
+            yield from api.destroy_service()
+
+        except Exception as e:
+            JujuClientTest.log.debug("Expected exception on destroy service: {}".
+                                     format(e))
+
+        self.assertTrue(env.destroy_service.called,
+                        "Destroy failed")
+
+    @mock.patch('rift.mano.utils.juju_api.Env1', autospec=True)
+    def test_client(self, mock_jujuclient):
+        loop = asyncio.get_event_loop()
+
+        loop.run_until_complete(self.juju_client_test(mock_jujuclient,
+                                                      loop))
+
+        loop.close()
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    log = logging.getLogger()
+    log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+    JujuClientTest.set_logger(log)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/common/rw_gen_package.py b/common/rw_gen_package.py
new file mode 100755 (executable)
index 0000000..427e717
--- /dev/null
@@ -0,0 +1,102 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import sys
+import os
+import subprocess
+import argparse
+import shutil
+import xml.etree.ElementTree as etree
+
+from gi.repository import (
+    RwYang,
+    NsdYang,
+    RwNsdYang,
+    VnfdYang,
+    RwVnfdYang,
+    VldYang,
+    RwVldYang
+)
+
+def read_from_file(module_list, infile, input_format, descr_type):
+      model = RwYang.Model.create_libncx()
+      for module in module_list:
+          model.load_module(module)
+
+      descr = None
+      if descr_type == "nsd":
+        descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+      else:
+        descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+
+      if input_format == 'json':
+          json_str = open(infile).read()
+          descr.from_json(model, json_str)
+
+      elif input_format.strip() == 'xml':
+          tree = etree.parse(infile)
+          root = tree.getroot()
+          xmlstr = etree.tostring(root, encoding="unicode")
+          descr.from_xml_v2(model, xmlstr)
+      else:
+          raise("Invalid input format for the descriptor")
+
+      return descr
+
+def write_to_file(name, outdir, infile, descr_type):
+      dirpath = os.path.join(outdir, name, descr_type)
+      if not os.path.exists(dirpath):
+          os.makedirs(dirpath)
+      shutil.copy(infile, dirpath)
+
+def main(argv=sys.argv[1:]):
+      global outdir, output_format
+      parser = argparse.ArgumentParser()
+      parser.add_argument('-i', '--infile', required=True,
+                          type=lambda x: os.path.isfile(x) and x or parser.error("INFILE does not exist"))
+      parser.add_argument('-o', '--outdir', default=".",
+                          type=lambda x: os.path.isdir(x) and x or parser.error("OUTDIR does not exist"))
+      parser.add_argument('-f', '--format', choices=['json', 'xml'], required=True)
+      parser.add_argument('-t', '--descriptor-type', choices=['nsd', 'vnfd'], required=True )
+
+      args = parser.parse_args()
+      infile = args.infile
+      input_format = args.format
+      outdir = args.outdir
+      dtype = args.descriptor_type
+
+      print('Reading file {} in format {}'.format(infile, input_format))
+      module_list = ['vld', 'rw-vld']
+      if dtype == 'nsd':
+          module_list.extend(['nsd', 'rw-nsd'])
+      else:
+          module_list.extend(['vnfd', 'rw-vnfd'])
+
+      descr = read_from_file(module_list, args.infile, args.format, dtype)
+
+      print("Creating %s descriptor for {}".format(dtype.upper(), descr.name))
+      write_to_file(descr.name, outdir, infile, dtype)
+      status = subprocess.call(os.path.join(os.environ["RIFT_INSTALL"],
+              "/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh %s %s" % (outdir, descr.name)), shell=True)
+      print("Status of %s descriptor package creation is: %s" % (dtype.upper(), status))
+
+
+if __name__ == "__main__":
+      main()
+
+
diff --git a/confd_client/CMakeLists.txt b/confd_client/CMakeLists.txt
new file mode 100644 (file)
index 0000000..d80cdb6
--- /dev/null
@@ -0,0 +1,28 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 2014/04/30
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+# confd_client executable
+add_executable(confd_client confd_client.c)
+
+target_link_libraries(confd_client
+  ${CMAKE_INSTALL_PREFIX}/usr/local/confd/lib/libconfd.so
+  pthread
+  )
diff --git a/confd_client/Makefile b/confd_client/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/confd_client/README b/confd_client/README
new file mode 100644 (file)
index 0000000..aa711c0
--- /dev/null
@@ -0,0 +1,8 @@
+This is barebones confd client test program. This is useful for confd module testing. To use this program follow these steps:
+
+1. Reserve and login to a VM a root
+2. cd ${RIFT_ROOT}
+3. ./rift-shell -e
+4. cd modules/core/mc/confd_client
+4. ./confd_client_opdata.sh (will measure the rate for fetching operational data)
+5. ./confd_client_config.sh (will measure the rate of config writes and reads)
diff --git a/confd_client/confd_client.c b/confd_client/confd_client.c
new file mode 100644 (file)
index 0000000..455ac1e
--- /dev/null
@@ -0,0 +1,434 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <poll.h> 
+#include <unistd.h> 
+#include <string.h>
+
+#include <confd_lib.h>
+#include "confd_cdb.h"
+#include "confd_dp.h"
+
+static struct confd_daemon_ctx *dctx;
+static int ctlsock;
+static int workersock;
+
+typedef struct _foodata {
+  char *name;
+  struct _foodata *next;
+} foodata_t;
+
+typedef struct _opdata {
+  foodata_t *foo;
+} opdata_t;
+
+opdata_t *g_opdata = NULL;
+
+int process_confd_subscription(int subsock)
+{
+  int confd_result, flags, length, *subscription_points, i, j, nvalues;
+  enum cdb_sub_notification type;
+  confd_tag_value_t *values;
+
+  confd_result = cdb_read_subscription_socket2(subsock,
+                                               &type,
+                                               &flags,
+                                               &subscription_points,
+                                               &length);
+
+  if (confd_result != CONFD_OK) {
+    confd_fatal("Failed to read subscription data \n");
+  }
+
+  switch (type) {
+    case CDB_SUB_PREPARE:
+      for (i = 0; i < length; i++) {
+        printf("i = %d, point = %d\n", i, subscription_points[i]);
+        if (cdb_get_modifications(subsock, subscription_points[i], flags, &values, &nvalues,
+                              "/") == CONFD_OK) {
+          for (j = 0; j < nvalues; j++) {
+            printf("j = %d\n", j);
+            confd_free_value(CONFD_GET_TAG_VALUE(&values[j]));
+          }
+        }
+      }
+      cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY);
+      fprintf(stdout, "CBD_SUB_PREPARE\n");
+      break;
+
+    case CDB_SUB_COMMIT:
+      cdb_sync_subscription_socket(subsock, CDB_DONE_PRIORITY);
+      fprintf(stdout, "CDB_SUB_COMMIT\n");
+      break;
+
+    case CDB_SUB_ABORT:
+      fprintf(stdout, "CDB_SUB_ABORT\n");
+      break;
+
+    default:
+      confd_fatal("Invalid type %d in cdb_read_subscription_socket2\n", type);
+  }
+
+  return 0;
+}
+
+static int do_init_action(struct confd_user_info *uinfo)
+{
+  int ret = CONFD_OK;
+  // fprintf(stdout, "init_action called\n");
+  confd_action_set_fd(uinfo, workersock);
+  return ret;
+}
+
+static int do_rw_action(struct confd_user_info *uinfo,
+                        struct xml_tag *name,
+                        confd_hkeypath_t *kp,
+                        confd_tag_value_t *params,
+                        int nparams)
+{
+  // confd_tag_value_t reply[2];
+  // int status;
+  // char *ret_status;
+  int i;
+  char buf[BUFSIZ];
+
+  /* Just print the parameters and return */
+
+  //
+  for (i = 0; i < nparams; i++) {
+    confd_pp_value(buf, sizeof(buf), CONFD_GET_TAG_VALUE(&params[i]));
+    printf("param %2d: %9u:%-9u, %s\n", i, CONFD_GET_TAG_NS(&params[i]),
+           CONFD_GET_TAG_TAG(&params[i]), buf);
+  }
+
+  i = 0;
+  // CONFD_SET_TAG_INT32(&reply[i], NULL, 0); i++;
+  // CONFD_SET_TAG_STR(&reply[i], NULL, "success"); i++;
+  confd_action_reply_values(uinfo, NULL, i);
+
+  return CONFD_OK;
+
+}
+
+static int get_next(struct confd_trans_ctx *tctx,
+                    confd_hkeypath_t *keypath,
+                    long next) 
+{
+  opdata_t *opdata = tctx->t_opaque;
+  foodata_t *curr;
+  confd_value_t v[2];
+
+  if (next == -1) { /* first call */
+    curr = opdata->foo;
+  } else {
+    curr = (foodata_t *)next;
+  }
+
+  if (curr == NULL) {
+    confd_data_reply_next_key(tctx, NULL, -1, -1);
+    return CONFD_OK;
+  }
+
+  CONFD_SET_STR(&v[0], curr->name);
+  confd_data_reply_next_key(tctx, &v[0], 1, (long)curr->next);
+  return CONFD_OK;
+}
+
+static foodata_t *find_foo(confd_hkeypath_t *keypath, opdata_t *dp)
+{
+  char *name = (char*)CONFD_GET_BUFPTR(&keypath->v[1][0]);
+  foodata_t *foo = dp->foo;
+  while (foo != NULL) {
+    if (strcmp(foo->name, name) == 0) {
+      return foo;
+    }
+    foo = foo->next;
+  }
+  return NULL;
+}
+
+/* Keypath example */
+/* /arpentries/arpe{192.168.1.1 eth0}/hwaddr */
+/* 3 2 1 0 */
+static int get_elem(struct confd_trans_ctx *tctx,
+                    confd_hkeypath_t *keypath)
+{
+  confd_value_t v;
+  foodata_t *foo = find_foo(keypath, tctx->t_opaque);
+  if (foo == NULL) {
+    confd_data_reply_not_found(tctx);
+    return CONFD_OK;
+  }
+  
+  CONFD_SET_STR(&v, foo->name);
+  confd_data_reply_value(tctx, &v);
+  
+  return CONFD_OK;
+}
+
+static foodata_t *create_dummy_foodata_list(int count)
+{
+  foodata_t *head, *curr, *prev;
+  int i;
+  char buf[64];
+
+  head = prev = curr = NULL;
+  for (i = 0; i < count; ++i) {
+    curr = malloc(sizeof(foodata_t));
+    memset(curr, 0, sizeof(foodata_t));
+    snprintf(buf, 64, "foo%d", i);
+    curr->name = strdup(buf);
+    if (prev) {
+      prev->next = curr;
+    } else {
+      head = curr;
+    }
+    prev = curr;
+  }
+
+  return head;
+}
+
+static void free_foodata_list(foodata_t *foo)
+{
+  foodata_t *curr, *next;
+  curr = foo;
+  while (curr) {
+    next = curr->next;
+    if (curr->name) {
+      free(curr->name);
+    }
+    free(curr);
+    curr = next;
+  }
+}
+
+static void print_foodata_list(foodata_t *foo) 
+{
+  foodata_t *curr = foo;
+  while (curr) {
+    // fprintf(stdout, "%s\n", curr->name);
+    curr = curr->next;
+  }
+}
+
+static int s_init(struct confd_trans_ctx *tctx)
+{
+  opdata_t *opdata;
+  if ((opdata = malloc(sizeof(opdata_t))) == NULL) {
+    return CONFD_ERR;
+  }
+
+  memset(opdata, 0, sizeof(opdata_t));
+  opdata->foo = create_dummy_foodata_list(10);
+  print_foodata_list(opdata->foo);
+  tctx->t_opaque = opdata;
+  confd_trans_set_fd(tctx, workersock);
+  return CONFD_OK;
+}
+
+static int s_finish(struct confd_trans_ctx *tctx)
+{
+  opdata_t *opdata = tctx->t_opaque;
+  if (opdata != NULL) {
+    free_foodata_list(opdata->foo);
+    free(opdata);
+  }
+
+  return CONFD_OK;
+}
+
+int main(int argc, char **argv)
+{
+  struct sockaddr_in addr;
+  int debuglevel = CONFD_TRACE;
+  struct confd_trans_cbs trans;
+  struct confd_data_cbs data;
+  struct confd_action_cbs action;
+  int i;
+
+  int subsock, datasock;
+  int status;
+  int spoint;
+
+  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+  addr.sin_family = AF_INET;
+  addr.sin_port = htons(CONFD_PORT);
+
+   /**
+   * Setup CDB subscription socket 
+   */
+  confd_init(argv[0], stderr, CONFD_DEBUG);
+  if ((subsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
+    confd_fatal("Failed to open subscription socket\n");
+  }
+
+  printf("Subscription socket: %d\n", subsock);
+
+  for (i = 1; i < 10; ++i) {
+    if (cdb_connect(subsock, CDB_SUBSCRIPTION_SOCKET,
+                    (struct sockaddr*)&addr,
+                    sizeof (struct sockaddr_in)) < 0) {
+      sleep(2);
+      fprintf(stdout, "Failed in confd_connect() {attempt: %d}\n", i);
+    } else {
+      fprintf(stdout, "confd_connect succeeded\n");
+      break;
+    }
+  }
+
+  if ((status = cdb_subscribe2(subsock, CDB_SUB_RUNNING_TWOPHASE, 0, 0, &spoint, 0, "/"))
+      != CONFD_OK) {
+    fprintf(stderr, "Terminate: subscribe %d\n", status);
+    exit(1);
+  }
+
+  if (cdb_subscribe_done(subsock) != CONFD_OK) {
+    confd_fatal("cdb_subscribe_done() failed");
+  }
+
+  /**
+   * Setup CBD data socket
+   */
+
+  if ((datasock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
+    confd_fatal("Failed to open data socket\n");
+  }
+
+  if (cdb_connect(datasock, CDB_DATA_SOCKET,
+                  (struct sockaddr*)&addr,
+                  sizeof (struct sockaddr_in)) < 0) {
+    confd_fatal("Failed to confd_connect() to confd \n");
+  }
+
+  memset(&trans, 0, sizeof (struct confd_trans_cbs));
+  trans.init = s_init;
+  trans.finish = s_finish;
+
+  memset(&data, 0, sizeof (struct confd_data_cbs));
+  data.get_elem = get_elem;
+  data.get_next = get_next;
+  strcpy(data.callpoint, "base_show");
+
+  memset(&action, 0, sizeof (action));
+  strcpy(action.actionpoint, "rw_action");
+  action.init = do_init_action;
+  action.action = do_rw_action;
+
+
+  /* initialize confd library */
+  confd_init("confd_client_op_data_daemon", stderr, debuglevel);
+
+
+  for (i = 1; i < 10; ++i) {
+    if (confd_load_schemas((struct sockaddr*)&addr,
+                           sizeof(struct sockaddr_in)) != CONFD_OK) {
+      fprintf(stdout, "Failed to load schemas from confd {attempt: %d}\n", i);
+      sleep(2);
+    } else {
+      fprintf(stdout, "confd_load_schemas succeeded\n");
+      break;
+    }
+  }
+
+  if ((dctx = confd_init_daemon("confd_client_op_data_daemon")) == NULL) {
+    confd_fatal("Failed to initialize confdlib\n");
+  }
+
+  /* Create the first control socket, all requests to */
+  /* create new transactions arrive here */
+  if ((ctlsock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
+    confd_fatal("Failed to open ctlsocket\n");
+  }
+
+  if (confd_connect(dctx, ctlsock, CONTROL_SOCKET, (struct sockaddr*)&addr,
+                    sizeof (struct sockaddr_in)) < 0) {
+    confd_fatal("Failed to confd_connect() to confd \n");
+  }
+
+  /* Also establish a workersocket, this is the most simple */
+  /* case where we have just one ctlsock and one workersock */
+  if ((workersock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
+    confd_fatal("Failed to open workersocket\n");
+  }
+
+  if (confd_connect(dctx, workersock, WORKER_SOCKET,(struct sockaddr*)&addr,
+                    sizeof (struct sockaddr_in)) < 0) {
+    confd_fatal("Failed to confd_connect() to confd \n");
+  }
+
+  if (confd_register_trans_cb(dctx, &trans) == CONFD_ERR) {
+    confd_fatal("Failed to register trans cb \n");
+  }
+
+  if (confd_register_data_cb(dctx, &data) == CONFD_ERR) {
+    confd_fatal("Failed to register data cb \n"); 
+  }
+
+  if (confd_register_action_cbs(dctx, &action) == CONFD_ERR) {
+    confd_fatal("Failed to register action cb \n"); 
+  }
+
+  if (confd_register_done(dctx) != CONFD_OK) {
+    confd_fatal("Failed to complete registration \n");
+  }
+
+  while(1) {
+    struct pollfd set[3];
+    int ret;
+    set[0].fd = ctlsock;
+    set[0].events = POLLIN;
+    set[0].revents = 0;
+    set[1].fd = workersock;
+    set[1].events = POLLIN;
+    set[1].revents = 0;
+    set[2].fd = subsock;
+    set[2].events = POLLIN;
+    set[2].revents = 0;
+    if (poll(set, sizeof(set)/sizeof(*set), -1) < 0) {
+      perror("Poll failed:");
+      continue;
+    }
+    /* Check for I/O */
+    if (set[0].revents & POLLIN) {
+      if ((ret = confd_fd_ready(dctx, ctlsock)) == CONFD_EOF) {
+        confd_fatal("Control socket closed\n");
+      } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) {
+        confd_fatal("Error on control socket request: %s (%d): %s\n",
+                    confd_strerror(confd_errno), confd_errno, confd_lasterr());
+      }
+    }
+    if (set[1].revents & POLLIN) {
+      if ((ret = confd_fd_ready(dctx, workersock)) == CONFD_EOF) {
+        confd_fatal("Worker socket closed\n");
+      } else if (ret == CONFD_ERR && confd_errno != CONFD_ERR_EXTERNAL) {
+        confd_fatal("Error on worker socket request: %s (%d): %s\n",
+                    confd_strerror(confd_errno), confd_errno, confd_lasterr());
+      }
+    }
+    if (set[2].revents & POLLIN) {
+      process_confd_subscription(set[2].fd);
+    }
+  }
+
+  return 0;
+}
diff --git a/confd_client/test.sh b/confd_client/test.sh
new file mode 100755 (executable)
index 0000000..644451d
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# This script tests the throughput of get operations.
+# change iter and loop variables
+
+NETCONF_CONSOLE_DIR=${RIFT_ROOT}/.install/usr/local/confd/bin
+
+iter=100
+loop=30
+
+for i in `seq 1 $loop`;
+do
+    echo "Background script $i"
+    ${NETCONF_CONSOLE_DIR}/netconf-console-tcp -s all --iter=$iter --get -x /opdata&
+done
+
+wait
+
+total=$(($iter * $loop))
+echo "Total number of netconf operations=$total"
+
+
+
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ff329dd
--- /dev/null
@@ -0,0 +1,35 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 03/26/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwmano_examples)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+##
+# Include the subdirs
+##
+set(subdirs
+  ping_pong_ns
+  )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/examples/Makefile b/examples/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/examples/ping_pong_ns/CMakeLists.txt b/examples/ping_pong_ns/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9667465
--- /dev/null
@@ -0,0 +1,80 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 03/26/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+configure_file(
+  ${CMAKE_CURRENT_SOURCE_DIR}/generate_packages.sh.in
+  ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh
+  ESCAPE_QUOTES @ONLY
+  )
+
+set(PACKAGE_OUTPUT
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_aws.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_aws.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_aws.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_epa.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_epa.tar.gz
+  ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_epa.tar.gz)
+
+add_custom_command(
+    OUTPUT ${PACKAGE_OUTPUT}
+    COMMAND ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh
+    DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/ping_pong_nsd.py
+  )
+
+add_custom_target(ping_pong_pkg_gen ALL
+    DEPENDS mano_yang ${PACKAGE_OUTPUT}
+  )
+
+install(
+    FILES ${PACKAGE_OUTPUT}
+    DESTINATION
+      usr/rift/mano/examples/ping_pong_ns
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+install(
+    FILES
+      ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_image.tar.gz
+      ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_image.tar.gz
+    DESTINATION
+      usr/rift/mano/examples/ping_pong_ns
+    COMPONENT ${PKG_LONG_NAME}
+    OPTIONAL
+    )
+
+rift_python_install_tree(
+  COMPONENT ${PKG_LONG_NAME}
+  FILES
+    rift/mano/examples/ping_pong_nsd.py
+    rift/mano/examples/start_traffic.py
+  )
+
+install(
+  PROGRAMS
+    rift/mano/examples/ping_config.py
+    stand_up_ping_pong
+  DESTINATION usr/bin
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
diff --git a/examples/ping_pong_ns/Makefile b/examples/ping_pong_ns/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/examples/ping_pong_ns/config_desc.py b/examples/ping_pong_ns/config_desc.py
new file mode 100755 (executable)
index 0000000..fcd1400
--- /dev/null
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import logging
+import rift.auto.proxy
+import rift.vcs
+import sys
+
+import gi
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import NsdYang, VldYang, VnfdYang, RwYang
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+model = RwYang.Model.create_libncx()
+model.load_schema_ypbc(VldYang.get_schema())
+model.load_schema_ypbc(NsdYang.get_schema())
+model.load_schema_ypbc(VnfdYang.get_schema())
+
+
+def configure_vld(proxy, vld_xml_hdl):
+    vld_xml = vld_xml_hdl.read()
+    logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml)
+    vld = VldYang.YangData_Vld_VldCatalog_Vld()
+    vld.from_xml_v2(model, vld_xml)
+
+    logger.debug("Sending VLD to netconf: %s", vld)
+    proxy.merge_config(vld.to_xml_v2(model))
+
+
+def configure_vnfd(proxy, vnfd_xml_hdl):
+    vnfd_xml = vnfd_xml_hdl.read()
+    logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml)
+    vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+    vnfd.from_xml_v2(model, vnfd_xml)
+
+    logger.debug("Sending VNFD to netconf: %s", vnfd)
+    proxy.merge_config(vnfd.to_xml_v2(model))
+
+
+def configure_nsd(proxy, nsd_xml_hdl):
+    nsd_xml = nsd_xml_hdl.read()
+    logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml)
+    nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+    nsd.from_xml_v2(model, nsd_xml)
+
+    logger.debug("Sending NSD to netconf: %s", nsd)
+    proxy.merge_config(nsd.to_xml_v2(model))
+
+
+def parse_args(argv=sys.argv[1:]):
+    """Create a parser which includes all generic demo arguments and parse args
+
+    Arguments:
+        argv - arguments to be parsed
+
+    Returns: List of parsed arguments
+    """
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+            '--confd-host',
+            default="127.0.0.1",
+            help="Hostname or IP where the confd netconf server is running.")
+
+    parser.add_argument(
+            '--vld-xml-file',
+            action="append",
+            default=[],
+            type=argparse.FileType(),
+            help="VLD XML File Path",
+            )
+
+    parser.add_argument(
+            '--vnfd-xml-file',
+            action="append",
+            default=[],
+            type=argparse.FileType(),
+            help="VNFD XML File Path",
+            )
+
+    parser.add_argument(
+            '--nsd-xml-file',
+            action="append",
+            default=[],
+            type=argparse.FileType(),
+            help="VNFD XML File Path",
+            )
+
+    parser.add_argument(
+            '-v', '--verbose',
+            action='store_true',
+            help="Logging is normally set to an INFO level. When this flag "
+                 "is used logging is set to DEBUG. ")
+
+    args = parser.parse_args(argv)
+
+    return args
+
+
+def connect(args):
+    # Initialize Netconf Management Proxy
+    mgmt_proxy = rift.auto.proxy.NetconfProxy(args.confd_host)
+    mgmt_proxy.connect()
+
+    # Ensure system started
+    vcs_component_info = rift.vcs.mgmt.VcsComponentInfo(mgmt_proxy)
+    vcs_component_info.wait_until_system_started()
+
+    return mgmt_proxy
+
+
+def main():
+    args = parse_args()
+    proxy = connect(args)
+    for xml_file in args.vnfd_xml_file:
+        configure_vnfd(proxy, xml_file)
+
+    for xml_file in args.vld_xml_file:
+        configure_vld(proxy, xml_file)
+
+    for xml_file in args.nsd_xml_file:
+        configure_nsd(proxy, xml_file)
+
+
+if __name__ == "__main__":
+    main()
+
diff --git a/examples/ping_pong_ns/generate_packages.sh.in b/examples/ping_pong_ns/generate_packages.sh.in
new file mode 100755 (executable)
index 0000000..ae54052
--- /dev/null
@@ -0,0 +1,140 @@
+#! /bin/bash
+
+set -e
+set -x
+
+SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@
+BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@
+PROJECT_TOP_DIR=@PROJECT_TOP_DIR@
+QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2
+RIFT_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda.qcow2
+PONG_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+PING_QCOW_IMAGE=${RIFT_ROOT}/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+
+# These paths are needed for finding the overrides and so files
+PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+
+#Rift Logos
+PING_VNFD_LOGO=${SOURCE_DIR}/rift_logo.png
+PONG_VNFD_LOGO=${SOURCE_DIR}/rift_logo.png
+PING_PONG_NSD_LOGO=${SOURCE_DIR}/rift_logo.png
+
+# Remove any old directories
+rm -rf ${BINARY_DIR}/ping_vnfd
+rm -rf ${BINARY_DIR}/pong_vnfd
+rm -rf ${BINARY_DIR}/ping_pong_nsd
+
+rm -rf ${BINARY_DIR}/ping_vnfd_with_image
+rm -rf ${BINARY_DIR}/pong_vnfd_with_image
+
+
+rm -rf ${BINARY_DIR}/ping_vnfd_aws
+rm -rf ${BINARY_DIR}/pong_vnfd_aws
+rm -rf ${BINARY_DIR}/ping_pong_nsd_aws
+
+rm -rf ${BINARY_DIR}/ping_vnfd_with_epa
+rm -rf ${BINARY_DIR}/pong_vnfd_with_epa
+rm -rf ${BINARY_DIR}/ping_pong_nsd_with_epa
+
+
+# Generate image md5sum
+ping_md5sum="$(md5sum ${PING_QCOW_IMAGE} | cut -f1 -d" ")"
+pong_md5sum="$(md5sum ${PONG_QCOW_IMAGE} | cut -f1 -d" ")"
+
+# Generate the descriptors (in various formats)
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR} --format=yaml --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} --pong-cloud-init=pong_cloud_init.cfg --ping-cloud-init=ping_cloud_init.cfg
+
+
+# create directories for packages with images
+cp -r ${BINARY_DIR}/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_image
+cp -r ${BINARY_DIR}/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_image
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_image/images
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_image/images
+
+### Generate descriptors for AWS
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/aws --format=json --aws
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/aws/ping_vnfd ${BINARY_DIR}/ping_vnfd_aws
+mv ${BINARY_DIR}/aws/pong_vnfd ${BINARY_DIR}/pong_vnfd_aws
+mv ${BINARY_DIR}/aws/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_aws
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/aws
+
+### Generate descriptors with EPA
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_epa --format=json --epa --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/with_epa/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_epa
+mv ${BINARY_DIR}/with_epa/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_epa
+mv ${BINARY_DIR}/with_epa/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_with_epa
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/with_epa
+
+# copy a dummy image for now
+if [ -e ${PING_QCOW_IMAGE} ]; then
+# Add RIFT Logos
+    mkdir -p ${BINARY_DIR}/ping_vnfd_with_image/icons
+    cp ${PING_VNFD_LOGO} ${BINARY_DIR}/ping_vnfd_with_image/icons/
+
+    cp ${PING_QCOW_IMAGE} ${BINARY_DIR}/ping_vnfd_with_image/images/
+    ${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_image
+else
+    echo >&2 "Warn: Skipped creating ping_vnfd_with_image due to missing image: ${PING_QCOW_IMAGE}"
+fi
+
+if [ -e ${PONG_QCOW_IMAGE} ]; then
+# Add RIFT Logos
+    mkdir -p ${BINARY_DIR}/pong_vnfd_with_image/icons
+    cp ${PONG_VNFD_LOGO} ${BINARY_DIR}/pong_vnfd_with_image/icons/
+
+    cp ${PONG_QCOW_IMAGE} ${BINARY_DIR}/pong_vnfd_with_image/images/
+    ${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_image
+else
+    echo >&2 "Warn: Skipped creating pong_vnfd_with_image due to missing image: ${PONG_QCOW_IMAGE}"
+fi
+
+# Add RIFT Logos
+mkdir -p ${BINARY_DIR}/ping_vnfd/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd/icons
+
+cp ${PING_VNFD_LOGO}      ${BINARY_DIR}/ping_vnfd/icons/
+cp ${PONG_VNFD_LOGO}      ${BINARY_DIR}/pong_vnfd/icons/
+cp ${PING_PONG_NSD_LOGO}  ${BINARY_DIR}/ping_pong_nsd/icons/
+
+# Generate the tar files
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd
+
+
+# Add RIFT Logos
+mkdir -p ${BINARY_DIR}/ping_vnfd_aws/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_aws/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_aws/icons
+
+cp ${PING_VNFD_LOGO}      ${BINARY_DIR}/ping_vnfd_aws/icons/
+cp ${PONG_VNFD_LOGO}      ${BINARY_DIR}/pong_vnfd_aws/icons/
+cp ${PING_PONG_NSD_LOGO}  ${BINARY_DIR}/ping_pong_nsd_aws/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_aws
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_aws
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_aws
+
+# Add RIFT Logos
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_epa/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_epa/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_with_epa/icons
+
+cp ${PING_VNFD_LOGO}      ${BINARY_DIR}/ping_vnfd_with_epa/icons/
+cp ${PONG_VNFD_LOGO}      ${BINARY_DIR}/pong_vnfd_with_epa/icons/
+cp ${PING_PONG_NSD_LOGO}  ${BINARY_DIR}/ping_pong_nsd_with_epa/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_epa
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_epa
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_epa
diff --git a/examples/ping_pong_ns/ping_pong_ns/__init__.py b/examples/ping_pong_ns/ping_pong_ns/__init__.py
new file mode 100644 (file)
index 0000000..966870a
--- /dev/null
@@ -0,0 +1,15 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
diff --git a/examples/ping_pong_ns/ping_pong_ns/ping.py b/examples/ping_pong_ns/ping_pong_ns/ping.py
new file mode 100644 (file)
index 0000000..00da96c
--- /dev/null
@@ -0,0 +1,312 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from datetime import date
+import logging
+import json
+import socket
+import threading
+import time
+
+import tornado.web
+
+from util.util import get_url_target
+class Ping(object):
+    def __init__(self):
+        self._log = logging.getLogger("ping")
+        self._log.setLevel(logging.DEBUG)
+
+        self._ping_count = 0;
+        self._request_count = 0;
+        self._response_count = 0;
+
+        self._pong_ip = None
+        self._pong_port = None
+
+        self._send_rate = 1 # per second
+
+        self._close_lock = threading.Lock()
+
+        self._enabled = False
+        self._socket = None
+
+    @property
+    def rate(self):
+        return self._send_rate
+
+    @rate.setter
+    def rate(self, value):
+        self._log.debug("new rate: %s" % value)
+        self._send_rate = value
+
+    @property
+    def pong_port(self):
+        return self._pong_port
+
+    @pong_port.setter
+    def pong_port(self, value):
+        self._log.debug("new pong port: %s" % value)
+        self._pong_port = value
+
+    @property
+    def pong_ip(self):
+        return self._pong_ip
+
+    @pong_ip.setter
+    def pong_ip(self, value):
+
+        self._log.debug("new pong ip: %s" % value)
+        self._pong_ip = value
+
+    @property
+    def enabled(self):
+        return self._enabled
+
+    @property
+    def request_count(self):
+        return self._request_count
+
+    @property
+    def response_count(self):
+        return self._response_count
+
+    def start(self):
+        self._log.debug("starting")
+        self._enabled = True
+        # self.open_socket()
+        self.send_thread = threading.Thread(target=self.send_ping)
+        self.recv_thread = threading.Thread(target=self.recv_resp)
+        self.send_thread.start()
+        self.recv_thread.start()
+
+    def stop(self):
+        self._log.debug("stopping")
+        self._enabled = False
+        self.close_socket("stopping")
+
+    def close_socket(self, msg):
+        self._close_lock.acquire()
+        if self._socket != None:
+            self._socket.close()
+            self._socket = None
+            self._log.info("Closed socket with msg={}".format(msg))
+        self._close_lock.release()
+
+    def open_socket(self):
+        try:
+            self._log.debug("construct socket")
+            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._socket.settimeout(1)
+        except socket.error as msg:
+            self._log.error("error constructing socket %s" % msg)
+            self._socket = None
+
+        while self._enabled:
+            try:
+                self._log.info("Trying to connect....")
+                self._socket.connect((self.pong_ip, self.pong_port))
+                self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+                self._log.info("Socket connected")
+                break
+            except socket.error as msg:
+                time.sleep(1)
+                
+
+    def send_ping(self):
+        self.open_socket()
+
+        while self._enabled:
+            if self._socket != None:
+                req = "rwpingpong-{}".format(self._ping_count)
+                try:
+                    self._log.info("sending: %s" %req)
+                    self._socket.sendall(req)
+                    self._ping_count += 1
+                    self._request_count += 1
+                except socket.error as msg:
+                    self._log.error("Error({}) sending data".format(msg))
+                    self.close_socket(msg)
+                    return
+        
+            time.sleep(1.0/self._send_rate)
+
+        self._log.info("Stopping send_ping")
+
+    def recv_resp(self):
+        while self._enabled:
+            respb = None
+            if self._socket != None:
+                try:
+                    respb = self._socket.recv(1024)
+                except socket.timeout:
+                    continue
+                except socket.error as msg:
+                    self._log.error("Error({}) receiving data".format(msg))
+                    time.sleep(1)
+                    continue
+                    # self.close_socket(msg)
+                    # return
+
+            if not respb:
+                continue
+
+            resp = respb.decode('UTF-8')
+            self._response_count += 1
+            self._log.info("receive: %s" % resp)
+
+        self._log.info("Stopping recv_resp")
+
+class PingServerHandler(tornado.web.RequestHandler):
+    def initialize(self, ping_instance):
+        self._ping_instance = ping_instance
+
+    def get(self, args):
+        response = {'ip': self._ping_instance.pong_ip,
+                    'port': self._ping_instance.pong_port}
+
+        self.write(response)
+
+    def post(self, args):
+        target = get_url_target(self.request.uri)
+        body = self.request.body.decode("utf-8")
+        body_header = self.request.headers.get("Content-Type")
+
+        if "json" not in body_header:
+            self.write("Content-Type must be some kind of json 2")
+            self.set_status(405)
+            return
+
+        try:
+            json_dicts = json.loads(body)
+        except:
+            self.write("Content-Type must be some kind of json 1")
+            self.set_status(405)
+            return
+
+        if target == "server":
+            if type(json_dicts['port']) is not int:
+                self.set_status(405)
+                return
+
+            if type(json_dicts['ip']) not in (str, unicode):
+                self.set_status(405)
+                return
+
+            self._ping_instance.pong_ip = json_dicts['ip']
+            self._ping_instance.pong_port = json_dicts['port']
+
+        else:
+            self.set_status(404)
+            return
+
+        self.set_status(200)
+
+class PingAdminStatusHandler(tornado.web.RequestHandler):
+    def initialize(self, ping_instance):
+        self._ping_instance = ping_instance
+
+    def get(self, args):
+        target = get_url_target(self.request.uri)
+        if target == "state":
+            value = "enabled" if self._ping_instance.enabled else "disabled"
+
+            response = { 'adminstatus': value }
+        else:
+            self.set_status(404)
+            return
+
+        self.write(response)
+
+    def post(self, args):
+        target = get_url_target(self.request.uri)
+        body = self.request.body.decode("utf-8")
+        body_header = self.request.headers.get("Content-Type")
+
+        if "json" not in body_header:
+            self.write("Content-Type must be some kind of json 2")
+            self.set_status(405)            
+            return
+            
+        try:
+            json_dicts = json.loads(body)
+        except:
+            self.write("Content-Type must be some kind of json 1")
+            self.set_status(405)            
+            return
+
+        if target == "state":
+            if type(json_dicts['enable']) is not bool:
+                self.set_status(405)            
+                return
+
+            if json_dicts['enable']:
+                if not self._ping_instance.enabled:
+                    self._ping_instance.start()
+            else:
+                self._ping_instance.stop()            
+
+        else:
+            self.set_status(404)
+            return
+
+        self.set_status(200)
+
+class PingStatsHandler(tornado.web.RequestHandler):
+    def initialize(self, ping_instance):
+        self._ping_instance = ping_instance
+
+    def get(self):
+        response = {'ping-request-tx-count': self._ping_instance.request_count,
+                    'ping-response-rx-count': self._ping_instance.response_count}
+
+        self.write(response)
+
+class PingRateHandler(tornado.web.RequestHandler):
+    def initialize(self, ping_instance):
+        self._ping_instance = ping_instance
+
+    def get(self, args):
+        response = { 'rate': self._ping_instance.rate }
+
+        self.write(response)
+
+    def post(self, args):
+        target = get_url_target(self.request.uri)
+        body = self.request.body.decode("utf-8")
+        body_header = self.request.headers.get("Content-Type")
+
+        if "json" not in body_header:
+            self.set_status(405)
+            return
+
+        try:
+            json_dicts = json.loads(body)
+        except:
+            self.set_status(405)
+            return
+
+        if target == "rate":
+            if type(json_dicts['rate']) is not int:
+                self.set_status(405)
+                return
+
+            self._ping_instance.rate = json_dicts['rate']
+        else:
+            self.set_status(404)
+            return
+
+        self.set_status(200)
diff --git a/examples/ping_pong_ns/ping_pong_ns/ping.service b/examples/ping_pong_ns/ping_pong_ns/ping.service
new file mode 100644 (file)
index 0000000..cd0ac65
--- /dev/null
@@ -0,0 +1,12 @@
+[Unit]
+Description=Ping Client
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/opt/rift/ping_pong_ns/start_ping
+
+[Install]
+WantedBy=multi-user.target
+
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/pong.py b/examples/ping_pong_ns/ping_pong_ns/pong.py
new file mode 100644 (file)
index 0000000..ee5c2d2
--- /dev/null
@@ -0,0 +1,334 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from datetime import date
+from Queue import Queue
+import logging
+import json
+import socket
+import threading
+import time
+
+import tornado.web
+
+from util.util import get_url_target
+class Stats(object):
+    def __init__(self):
+        self._request_count = 0
+        self._response_count = 0
+
+        self._lock = threading.Lock()
+
+    @property
+    def request_count(self):
+        with self._lock:
+            return self._request_count
+
+    @request_count.setter
+    def request_count(self, value):
+        with self._lock:
+            self._request_count = value
+
+    @property
+    def response_count(self):
+        with self._lock:
+            return self._response_count
+
+    @response_count.setter
+    def response_count(self, value):
+        with self._lock:
+            self._response_count = value
+        
+class Worker(threading.Thread):
+    def __init__(self, log, connections, stats):
+        super(Worker, self).__init__()
+        self._log = log
+        self._connections = connections
+        self._stats = stats
+
+        self._running = True
+
+        self._lock = threading.Lock()
+        
+    @property
+    def running(self):
+        return self._running
+
+    @running.setter
+    def running(self, value):
+        self._running = value
+
+    def run(self):
+        while self.running:
+            try:
+                connection = self._connections.get_nowait()
+            except:
+                continue
+            
+            try:
+                req = connection.recv(1024)
+            except socket.error as msg:
+                self._log.error("error with connection read: " % msg)
+                self._connections.put(connection)
+                continue
+
+            if not req:
+                self._connections.put(connection)
+                continue
+
+            resp = req.decode('UTF-8')
+            self._log.debug("got: %s", resp)
+
+            self._stats.request_count += 1
+
+            try:
+                connection.sendall(resp)
+                self._stats.response_count += 1
+            except socket.error as msg:
+                self._log.error("error with connection read: " % msg)
+                self._connections.put(connection)
+                continue
+
+            self._connections.put(connection)        
+
+class Pong(object):
+    def __init__(self, worker_count=5):
+        self._log = logging.getLogger("pong")
+        self._log.setLevel(logging.DEBUG)
+
+        self.listen_ip = None
+        self.listen_port = None
+
+        self._lock = threading.Lock()
+
+        self._connections = Queue()
+        
+        self._stats = Stats()
+
+        self._workers = list()
+
+        self._enabled = False
+
+        for _ in range(worker_count):
+            self._workers.append(Worker(self._log, self._connections, self._stats))
+
+    @property
+    def listen_port(self):
+        return self._listen_port
+
+    @listen_port.setter
+    def listen_port(self, value):
+        self._log.debug("new listen port: %s" % value)
+        self._listen_port = value
+
+    @property
+    def listen_ip(self):
+        return self._listen_ip
+
+    @listen_ip.setter
+    def listen_ip(self, value):
+        self._log.debug("listen pong ip: %s" % value)
+        self._listen_ip = value
+
+
+    @property
+    def enabled(self):
+        with self._lock:
+            return self._enabled
+
+    @property
+    def request_count(self):
+        return self._stats.request_count
+
+    @property
+    def response_count(self):
+        return self._stats.response_count
+
+    def start(self):
+        self._log.debug("starting")
+        self._enabled = True
+        self.listener_thread = threading.Thread(target=self._listen)
+        self.listener_thread.start()
+        for worker in self._workers:
+            worker.start()
+
+    def stop(self):
+        with self._lock:
+            self._enabled = False
+
+            self._log.debug("stopping workers")
+            for worker in self._workers:
+                worker.running = False
+
+            self._log.debug("joining on workers")
+            for worker in self._workers:
+                if worker.is_alive():
+                    worker.join()
+
+            while self._connections.full():
+                try:
+                    connection = self._connections.get_nowait()
+                    connection.close()
+                except:
+                    pass
+
+    def close_socket(self, msg):
+        with self._lock:
+            if self._socket != None:
+                self._socket.shutdown(socket.SHUT_RD)
+                self._socket.close()
+                self._socket = None
+                self._log.info("Closed socket with msg={}".format(msg))
+
+    def _listen(self):
+        if self._listen_ip is None or self.listen_port is None:
+            self._log.error("address not properly configured to listen")
+            return
+
+        self._log.info("listen for incomming connections")
+        try:
+            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+            # self._socket.bind((self.listen_ip, self.listen_port))
+            self._socket.bind(("0.0.0.0", self.listen_port))
+            self._socket.settimeout(1)
+
+            while self.enabled:
+                
+                try:
+                    self._socket.listen(1)
+                    connection, address = self._socket.accept()
+                except socket.timeout:
+                    continue
+                self._log.info("Accepted connection from {}".format(address))
+
+                self._connections.put(connection)
+            else:
+                self.stop()
+        except socket.error as msg:
+            self.close_socket(msg)
+
+class PongStatsHandler(tornado.web.RequestHandler):
+    def initialize(self, pong_instance):
+        self._pong_instance = pong_instance
+
+    def get(self):
+        response = {'ping-request-rx-count': self._pong_instance.request_count,
+                    'ping-response-tx-count': self._pong_instance.response_count}
+
+        self.write(response)
+
+
+class PongServerHandler(tornado.web.RequestHandler):
+    def initialize(self, pong_instance):
+        self._pong_instance = pong_instance
+
+    def get(self, args):
+        response = {'ip': self._pong_instance.listen_ip,
+                    'port': self._pong_instance.listen_port}
+
+        self.write(response)
+
+    def post(self, args):
+        target = get_url_target(self.request.uri)
+        body = self.request.body.decode("utf-8")
+        body_header = self.request.headers.get("Content-Type")
+
+        if "json" not in body_header:
+            self.write("Content-Type must be some kind of json")
+            self.set_status(405)
+            return
+
+        try:
+            json_dicts = json.loads(body)
+        except:
+            self.write("Content-Type must be some kind of json")
+            self.set_status(405)
+            return
+
+        if target == "server":
+
+            if type(json_dicts['port']) is not int:
+                self.set_status(405)
+                return
+
+            if type(json_dicts['ip']) not in (str, unicode):
+                self.set_status(405)
+                return
+
+            self._pong_instance.listen_ip = json_dicts['ip']
+            self._pong_instance.listen_port = json_dicts['port']
+
+        else:
+            self.set_status(404)
+            return
+
+        self.set_status(200)
+
+class PongAdminStatusHandler(tornado.web.RequestHandler):
+    def initialize(self, pong_instance):
+        self._pong_instance = pong_instance
+
+    def get(self, args):
+        target = get_url_target(self.request.uri)
+        
+        if target == "state":
+            value = "enabled" if self._pong_instance.enabled else "disabled"
+
+            response = { 'adminstatus': value }
+        else:
+            self.set_status(404)
+            return
+
+        self.write(response)
+
+    def post(self, args):
+        target = get_url_target(self.request.uri)
+        body = self.request.body.decode("utf-8")
+        body_header = self.request.headers.get("Content-Type")
+
+        if "json" not in body_header:
+            self.write("Content-Type must be some kind of json")
+            self.set_status(405)            
+            return
+            
+        try:
+            json_dicts = json.loads(body)
+        except:
+            self.write("Content-Type must be some kind of json")
+            self.set_status(405)            
+            return
+
+        if target == "state":
+            if type(json_dicts['enable']) is not bool:
+                self.set_status(405)            
+                return
+
+            if json_dicts['enable']:
+                if not self._pong_instance.enabled:
+                    self._pong_instance.start()
+            else:
+                self._pong_instance.stop()
+
+        else:
+            self.set_status(404)
+            return
+
+        self.set_status(200)
+
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/pong.service b/examples/ping_pong_ns/ping_pong_ns/pong.service
new file mode 100644 (file)
index 0000000..7d94836
--- /dev/null
@@ -0,0 +1,12 @@
+[Unit]
+Description=Ping Client
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/opt/rift/ping_pong_ns/start_pong
+
+[Install]
+WantedBy=multi-user.target
+
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh b/examples/ping_pong_ns/ping_pong_ns/prepare_ping_pong_qcow.sh
new file mode 100755 (executable)
index 0000000..e73144a
--- /dev/null
@@ -0,0 +1,137 @@
+#! /bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 07/24/2014
+# 
+
+##
+# This script is used to copy the riftware software into the qcow image
+# This script must be run on the grunt machine as root
+##
+
+set -x
+set -e
+
+if ! [ $# -eq 1 ]; then
+    echo "Usage: $0 <ping-pong-ns-dir>"
+    echo "       Example:"
+    echo "       $0 /net/boson/home1/agunturu/lepton/atg/modules/core/mc/examples/ping_pong_ns"
+    exit 1
+fi
+
+# Currently returning 0 on error as this script fails in Bangalore
+# systems and causes the jenkins spot_debug to fail
+function cleanup {
+  if [ "$(ls -A $MOUNT_PT)" ]; then
+    guestunmount $MOUNT_PT
+  fi
+  exit 0
+}
+trap cleanup EXIT
+
+MOUNT_PT=ping_pong/mnt$$
+
+if  [ -d $MOUNT_PT ]; then
+  echo "ping_pong_mnt directory exists - deleting..!!"
+  guestunmount $MOUNT_PT || true
+  rm -rf ping_pong
+fi
+
+mkdir -p $MOUNT_PT
+FC20QCOW=Fedora-x86_64-20-20131211.1-sda.qcow2
+PINGQCOW=Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+PONGQCOW=Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+
+if [ ! -e ${RIFT_ROOT}/images/${FC20QCOW} ]; then
+    echo >&2 "Warn: Cannot prepare ping_pong qcow due to missing FC20 image: ${RIFT_ROOT}/images/${FC20QCOW}"
+    exit 0
+fi
+
+echo "Copying $FC20QCOW"
+cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PINGQCOW}
+chmod +w ping_pong/${PINGQCOW}
+cp ${RIFT_ROOT}/images/${FC20QCOW} ping_pong/${PONGQCOW}
+chmod +w ping_pong/${PONGQCOW}
+
+CURRENT_DIR=$PWD
+echo "Mounting guestfs for $PINGQCOW"
+guestmount -a ping_pong/$PINGQCOW -m /dev/sda1 $MOUNT_PT
+
+echo "Setting up resolv.conf"
+# removed RIFT.io lab-centric setup in RIFT-11991
+#echo "search lab.riftio.com eng.riftio.com riftio.com" >  $MOUNT_PT/etc/resolv.conf
+#echo "nameserver 10.64.1.3" >>  $MOUNT_PT/etc/resolv.conf
+#echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0
+
+# add a valid DNS server just in case
+echo "nameserver 8.8.8.8" >  $MOUNT_PT/etc/resolv.conf
+echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0
+
+for i in 1 2
+do
+    cat <<EOF >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i
+DEVICE="eth$i"
+BOOTPROTO="dhcp"
+ONBOOT="no"
+TYPE="Ethernet"
+DEFROUTE=no
+PEERDNS=no
+EOF
+done
+
+
+echo "Copying ping/pong ns..."
+cd $MOUNT_PT/opt
+mkdir rift
+cd rift
+cp -r $1 .
+cd $CURRENT_DIR
+mv $MOUNT_PT/opt/rift/ping_pong_ns/ping.service $MOUNT_PT/etc/systemd/system
+cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/
+guestunmount $MOUNT_PT
+
+echo "Mounting guestfs for $PINGQCOW"
+guestmount -a ping_pong/$PONGQCOW -m /dev/sda1 $MOUNT_PT
+
+echo "Setting up resolv.conf"
+echo "search lab.riftio.com eng.riftio.com riftio.com" >  $MOUNT_PT/etc/resolv.conf
+echo "nameserver 10.64.1.3" >>  $MOUNT_PT/etc/resolv.conf
+echo "PEERDNS=no" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0
+echo "DEFROUTE=yes" >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth0
+
+for i in 1 2
+do
+    cat <<EOF >> $MOUNT_PT/etc/sysconfig/network-scripts/ifcfg-eth$i
+DEVICE="eth$i"
+BOOTPROTO="dhcp"
+ONBOOT="no"
+DEFROUTE=no
+TYPE="Ethernet"
+PEERDNS=no
+EOF
+done
+
+echo "Copying ping/pong ns..."
+cd $MOUNT_PT/opt
+mkdir rift
+cd rift
+cp -r $1 .
+cd $CURRENT_DIR
+cp -ar /usr/lib/python2.7/site-packages/tornado $MOUNT_PT/usr/lib/python2.7/site-packages/
+mv $MOUNT_PT/opt/rift/ping_pong_ns/pong.service $MOUNT_PT/etc/systemd/system
+guestunmount $MOUNT_PT
diff --git a/examples/ping_pong_ns/ping_pong_ns/start_ping b/examples/ping_pong_ns/ping_pong_ns/start_ping
new file mode 100755 (executable)
index 0000000..fb29422
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+ulimit -c 0 
+#yum install -y python-tornado
+python /opt/rift/ping_pong_ns/start_ping.py 2>&1 | logger
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/start_ping.py b/examples/ping_pong_ns/ping_pong_ns/start_ping.py
new file mode 100644 (file)
index 0000000..ace5981
--- /dev/null
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import signal
+import logging
+
+import tornado
+import tornado.httpserver
+
+from ping import (
+    Ping,
+    PingAdminStatusHandler,
+    PingServerHandler,
+    PingRateHandler,
+    PingStatsHandler,
+)
+from util.util import (
+    VersionHandler,    
+)
+
+logging.basicConfig(level=logging.DEBUG,
+                    format='(%(threadName)-10s) %(name)-8s :: %(message)s',
+)
+
+def main():
+    log = logging.getLogger("main")
+
+    # parse arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--ping-manager-port",
+        required=False,
+        default="18888",
+        help="port number for ping")
+
+    arguments = parser.parse_args()
+
+    # setup application
+    log.debug("setup application")
+    ping_instance = Ping()
+    ping_application_arguments = {'ping_instance': ping_instance}
+    ping_application = tornado.web.Application([
+        (r"/api/v1/ping/stats", PingStatsHandler, ping_application_arguments),
+        (r"/api/v1/ping/adminstatus/([a-z]+)", PingAdminStatusHandler, ping_application_arguments),
+        (r"/api/v1/ping/server/?([0-9a-z\.]*)", PingServerHandler, ping_application_arguments),
+        (r"/api/v1/ping/rate/?([0-9]*)", PingRateHandler, ping_application_arguments),
+        (r"/version", VersionHandler, ping_application_arguments)
+    ])
+    ping_server = tornado.httpserver.HTTPServer(
+        ping_application)
+
+    # setup SIGINT handler
+    log.debug("setup SIGINT handler")
+    def signal_handler(signal, frame):
+        print("") # print newline to clear user input
+        log.info("Exiting")
+        ping_instance.stop()
+        ping_server.stop()
+        log.info("Sayonara!")
+        quit()
+
+    signal.signal(signal.SIGINT, signal_handler)
+    
+    # start
+    log.debug("start")
+    try:
+        ping_server.listen(arguments.ping_manager_port)
+    except OSError:
+        print("port %s is already is use, exiting" % arguments.ping_manager_port)
+        return
+
+    tornado.ioloop.IOLoop.instance().start()
+    
+if __name__ == "__main__":
+    main()
+
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/start_pong b/examples/ping_pong_ns/ping_pong_ns/start_pong
new file mode 100755 (executable)
index 0000000..af46646
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+ulimit -c 0 
+#yum install -y python-tornado
+python /opt/rift/ping_pong_ns/start_pong.py 2>&1 | logger
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/start_pong.py b/examples/ping_pong_ns/ping_pong_ns/start_pong.py
new file mode 100644 (file)
index 0000000..235efb2
--- /dev/null
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import signal
+import logging
+
+import tornado
+import tornado.httpserver
+
+from pong import (
+    Pong,
+    PongAdminStatusHandler,
+    PongServerHandler,
+    PongStatsHandler,
+)
+from util.util import (
+    VersionHandler,    
+)
+
+logging.basicConfig(level=logging.DEBUG,
+                    format='(%(threadName)-10s) %(name)-8s :: %(message)s',
+)
+
+def main():
+    log = logging.getLogger("main")
+
+    # parse arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--pong-manager-port",
+        required=False,
+        default="18889",
+        help="port number for pong")
+    parser.add_argument(
+        "--worker-count",
+        required=False,
+        default=5,
+        help="ip address of pong")
+
+    arguments = parser.parse_args()
+
+    # setup application
+    log.debug("setup application")
+    pong_instance = Pong(arguments.worker_count)
+    pong_application_arguments = {'pong_instance': pong_instance}
+    pong_application = tornado.web.Application([
+        (r"/version", VersionHandler, pong_application_arguments),
+        (r"/api/v1/pong/stats", PongStatsHandler, pong_application_arguments),
+        (r"/api/v1/pong/server/?([0-9a-z\.]*)", PongServerHandler, pong_application_arguments),
+        (r"/api/v1/pong/adminstatus/([a-z]+)", PongAdminStatusHandler, pong_application_arguments)
+    ])
+    pong_server = tornado.httpserver.HTTPServer(
+        pong_application)
+
+    # setup SIGINT handler
+    log.debug("setup SIGINT handler")
+    def signal_handler(signal, frame):
+        print("") # print newline to clear user input
+        log.info("Exiting")
+        pong_instance.stop()
+        pong_server.stop()
+        log.info("Sayonara!")
+        quit()
+
+    signal.signal(signal.SIGINT, signal_handler)
+    
+    # start
+    log.debug("pong application listening on %s" % arguments.pong_manager_port)
+    try:
+        pong_server.listen(arguments.pong_manager_port)
+    except OSError:
+        print("port %s is already is use, exiting" % arguments.ping_manager_port)
+        return
+    tornado.ioloop.IOLoop.instance().start()
+    
+if __name__ == "__main__":
+    main()
+
+
diff --git a/examples/ping_pong_ns/ping_pong_ns/test/test.sh b/examples/ping_pong_ns/ping_pong_ns/test/test.sh
new file mode 100644 (file)
index 0000000..c05a7d5
--- /dev/null
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+pong_ip='14.0.0.2'
+pong_port=18889
+
+ping_ip='14.0.0.3'
+
+ping_port=18888
+
+if [ "$1" == "pong" ];
+then
+    if [ "$2" == "enable" ];
+    then
+       echo "enable pong"
+
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"enable\":true}" \
+            http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state
+    fi
+    if [ "$2" == "disable" ];
+    then
+       echo "disable pong"
+       
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"enable\":false}" \
+            http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state
+    fi
+
+    if [ "$2" == "server" ];
+    then
+       echo "set server"
+       
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"ip\":\"$3\", \"port\":$4}" \
+            http://${pong_ip}:${pong_port}/api/v1/pong/server
+    fi
+
+    echo ""
+fi
+
+if [ "$1" == "ping" ];
+then
+    if [ "$2" == "enable" ];
+    then
+       echo "enable ping"
+
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"enable\":true}" \
+            http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state
+    fi
+    if [ "$2" == "disable" ];
+    then
+       echo "disable ping"
+       
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"enable\":false}" \
+            http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state
+    fi
+    echo ""
+
+    if [ "$2" == "rate" ];
+    then
+       echo "disable ping"
+       
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"rate\":$3}" \
+            http://${ping_ip}:${ping_port}/api/v1/ping/rate
+    fi
+    echo ""
+
+    if [ "$2" == "server" ];
+    then
+       echo "set server"
+       
+       curl -D /dev/stdout \
+            -H "Accept: application/vnd.yang.data+xml" \
+            -H "Content-Type: application/vnd.yang.data+json" \
+            -X POST \
+            -d "{\"ip\":\"$3\", \"port\":$4}" \
+            http://${ping_ip}:${ping_port}/api/v1/ping/server
+    fi
+    echo ""
+
+    
+fi
+
+if [ "$1" == "stats" ];
+then
+    echo "ping stats:"
+    curl http://${ping_ip}:${ping_port}/api/v1/ping/stats
+    echo ""
+
+    echo "pong stats:"
+    curl http://${pong_ip}:${pong_port}/api/v1/pong/stats
+    echo ""
+fi
+
+if [ "$1" == "config" ];
+then
+    echo "ping server:"
+    curl http://${ping_ip}:${ping_port}/api/v1/ping/server
+    echo ""
+    echo "ping rate:"
+    curl http://${ping_ip}:${ping_port}/api/v1/ping/rate
+    echo ""
+    echo "ping admin status:"
+    curl http://${ping_ip}:${ping_port}/api/v1/ping/adminstatus/state
+    echo ""
+    echo "pong server:"
+    curl http://${pong_ip}:${pong_port}/api/v1/pong/server
+    echo ""
+    echo "pong admin status:"
+    curl http://${pong_ip}:${pong_port}/api/v1/pong/adminstatus/state
+    echo ""
+fi
diff --git a/examples/ping_pong_ns/ping_pong_ns/user-data b/examples/ping_pong_ns/ping_pong_ns/user-data
new file mode 100644 (file)
index 0000000..9bf1d5b
--- /dev/null
@@ -0,0 +1,8 @@
+#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+runcmd:
+  - [ systemctl, daemon-reload ]
+  - [ systemctl, enable, ping.service ]
+  - [ systemctl, start, --no-block, ping.service ]
diff --git a/examples/ping_pong_ns/ping_pong_ns/util/__init__.py b/examples/ping_pong_ns/ping_pong_ns/util/__init__.py
new file mode 100644 (file)
index 0000000..966870a
--- /dev/null
@@ -0,0 +1,15 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
diff --git a/examples/ping_pong_ns/ping_pong_ns/util/util.py b/examples/ping_pong_ns/ping_pong_ns/util/util.py
new file mode 100644 (file)
index 0000000..66c11fc
--- /dev/null
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from datetime import date
+import urlparse
+
+import tornado.web
+
+class VersionHandler(tornado.web.RequestHandler):
+    def initialize(self, instance):
+        self._instance = instance
+
+    def get(self):
+        response = { 'version': '3.5.1',
+                     'last_build':  date.today().isoformat() }
+        self.write(response)
+def get_url_target(url):
+    is_operation = False
+    url_parts = urlparse.urlsplit(url)
+    whole_url = url_parts[2]
+
+    url_pieces = whole_url.split("/")
+    
+    return url_pieces[-1]
diff --git a/examples/ping_pong_ns/ping_pong_nsd.py b/examples/ping_pong_ns/ping_pong_nsd.py
new file mode 120000 (symlink)
index 0000000..3147ac8
--- /dev/null
@@ -0,0 +1 @@
+rift/mano/examples/ping_pong_nsd.py
\ No newline at end of file
diff --git a/examples/ping_pong_ns/rift/mano/__init__.py b/examples/ping_pong_ns/rift/mano/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/ping_pong_ns/rift/mano/examples/__init__.py b/examples/ping_pong_ns/rift/mano/examples/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/examples/ping_pong_ns/rift/mano/examples/ping_config.py b/examples/ping_pong_ns/rift/mano/examples/ping_config.py
new file mode 100755 (executable)
index 0000000..4e5fd35
--- /dev/null
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import logging
+import os
+import stat
+import subprocess
+import sys
+import time
+import yaml
+
+def ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
+    sh_file = "{}/ping_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+    logger.debug("Creating script file %s" % sh_file)
+    f = open(sh_file, "w")
+    f.write(r'''
+#!/bin/bash
+
+# Rest API config
+ping_mgmt_ip='{}'
+ping_mgmt_port={}
+
+# VNF specific configuration
+pong_server_ip='{}'
+ping_rate=5
+server_port=5555
+'''.format(mgmt_ip, mgmt_port, pong_cp))
+
+    f.write(r'''
+# Check if the port is open
+DELAY=1
+MAX_TRIES=60
+COUNT=0
+while true; do
+    COUNT=$(expr $COUNT + 1)
+    timeout 1 bash -c "cat < /dev/null > /dev/tcp/${ping_mgmt_ip}/${ping_mgmt_port}"
+    rc=$?
+    if [ $rc -ne 0 ]
+    then
+        echo "Failed to connect to server ${ping_mgmt_ip}:${ping_mgmt_port} for ping with $rc!"
+        if [ ${COUNT} -gt ${MAX_TRIES} ]; then
+            exit $rc
+        fi
+        sleep ${DELAY}
+    else
+        break
+    fi
+done
+
+# Make rest API calls to configure VNF
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set server info for ping!"
+    exit $rc
+fi
+
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"rate\":$ping_rate}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set ping rate!"
+    exit $rc
+fi
+
+output=$(curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"enable\":true}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
+if [[ $output == *"Internal Server Error"* ]]
+then
+    echo $output
+    exit 3
+else
+    echo $output
+fi
+
+exit 0
+''')
+    f.close()
+    os.chmod(sh_file, stat.S_IRWXU)
+    if not dry_run:
+        rc = subprocess.call(sh_file, shell=True)
+        if rc:
+            logger.error("Config failed: {}".format(rc))
+            return False
+    return True
+
+
+
+def main(argv=sys.argv[1:]):
+    try:
+        parser = argparse.ArgumentParser()
+        parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+        parser.add_argument("--dry-run", action="store_true")
+        parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
+        args = parser.parse_args()
+
+        run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+        if not os.path.exists(run_dir):
+            os.makedirs(run_dir)
+        log_file = "{}/rift_ping_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+        logging.basicConfig(filename=log_file, level=logging.DEBUG)
+        logger = logging.getLogger()
+
+        ch = logging.StreamHandler()
+        if args.verbose:
+            ch.setLevel(logging.DEBUG)
+        else:
+            ch.setLevel(logging.INFO)
+
+        # create formatter and add it to the handlers
+        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+        ch.setFormatter(formatter)
+        logger.addHandler(ch)
+
+    except Exception as e:
+        print("Got exception:{}".format(e))
+        raise e
+
+    try:
+        dry_run = args.dry_run
+
+        yaml_str = args.yaml_cfg_file.read()
+        logger.debug("Input YAML file: {}".format(yaml_str))
+        yaml_cfg = yaml.load(yaml_str)
+        logger.debug("Input YAML: {}".format(yaml_cfg))
+
+        # Check if this is post scale out trigger
+        if yaml_cfg['trigger'] != 'post_scale_out':
+            logger.error("Unexpected trigger {}".
+                         format(yaml_cfg['trigger']))
+            raise
+
+        pong_cp = ""
+        for vnfr in yaml_cfg['vnfrs_others']:
+            # Find the pong VNFR, assuming vnfr name will
+            # have pong_vnfd as a substring
+            if 'pong_vnfd' in vnfr['name']:
+                for cp in vnfr['connection_points']:
+                    logger.debug("Connection point {}".format(cp))
+                    if 'cp0' in cp['name']:
+                        pong_cp = cp['ip_address']
+                        break
+        if not len(pong_cp):
+            logger.error("Did not get Pong cp0 IP")
+            raise
+
+        for vnfr in yaml_cfg['vnfrs_in_group']:
+            mgmt_ip = vnfr['rw_mgmt_ip']
+            mgmt_port = vnfr['rw_mgmt_port']
+            if ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
+                logger.info("Successfully configured Ping {} at {}".
+                            format(vnfr['name'], mgmt_ip))
+            else:
+                logger.error("Config of ping {} with {} failed".
+                             format(vnfr['name'], mgmt_ip))
+                raise
+
+    except Exception as e:
+        logger.error("Got exception {}".format(e))
+        logger.exception(e)
+        raise e
+
+if __name__ == "__main__":
+    main()
diff --git a/examples/ping_pong_ns/rift/mano/examples/ping_config_ut.sh b/examples/ping_pong_ns/rift/mano/examples/ping_config_ut.sh
new file mode 100755 (executable)
index 0000000..67f3f19
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+echo "Executed ping config!"
diff --git a/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py b/examples/ping_pong_ns/rift/mano/examples/ping_pong_nsd.py
new file mode 100755 (executable)
index 0000000..bb834ed
--- /dev/null
@@ -0,0 +1,1075 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import os
+import shutil
+import sys
+import uuid
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+gi.require_version('RwNsdYang', '1.0')
+gi.require_version('NsdYang', '1.0')
+
+
+from gi.repository import (
+    RwNsdYang,
+    NsdYang,
+    RwVnfdYang,
+    VnfdYang,
+    RwYang,
+    )
+
+
+try:
+    import rift.mano.config_data.config as config_data
+except ImportError:
+    # Load modules from common which are not yet installed
+    path = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "../../../common/python/rift/mano")
+    sys.path.append(path)
+
+    import config_data.config as config_data
+
+
+NUM_PING_INSTANCES = 1
+MAX_VNF_INSTANCES_PER_NS = 10
+use_epa = False
+aws = False
+pingcount = NUM_PING_INSTANCES
+use_ping_cloud_init_file = ""
+use_pong_cloud_init_file = ""
+
+PING_USERDATA_FILE = '''#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+runcmd:
+  - [ systemctl, daemon-reload ]
+  - [ systemctl, enable, ping.service ]
+  - [ systemctl, start, --no-block, ping.service ]
+  - [ ifup, eth1 ]
+'''
+
+PONG_USERDATA_FILE = '''#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+runcmd:
+  - [ systemctl, daemon-reload ]
+  - [ systemctl, enable, pong.service ]
+  - [ systemctl, start, --no-block, pong.service ]
+  - [ ifup, eth1 ]
+'''
+
+
+class UnknownVNFError(Exception):
+    pass
+
+
+class ManoDescriptor(object):
+    def __init__(self, name):
+        self.name = name
+        self.descriptor = None
+
+    def write_to_file(self, module_list, outdir, output_format):
+        model = RwYang.Model.create_libncx()
+        for module in module_list:
+            model.load_module(module)
+
+        if output_format == 'json':
+            with open('%s/%s.json' % (outdir, self.name), "w") as fh:
+                fh.write(self.descriptor.to_json(model))
+        elif output_format.strip() == 'xml':
+            with open('%s/%s.xml' % (outdir, self.name), "w") as fh:
+                fh.write(self.descriptor.to_xml_v2(model))
+        elif output_format.strip() == 'yaml':
+            with open('%s/%s.yaml' % (outdir, self.name), "w") as fh:
+                fh.write(self.descriptor.to_yaml(model))
+        else:
+            raise Exception("Invalid output format for the descriptor")
+
+    def get_json(self, module_list):
+        model = RwYang.Model.create_libncx()
+        for module in module_list:
+            model.load_module(module)
+        print(self.descriptor.to_json(model))
+
+
+class VirtualNetworkFunction(ManoDescriptor):
+    def __init__(self, name, instance_count=1):
+        self.vnfd_catalog = None
+        self.vnfd = None
+        self.instance_count = instance_count
+        self._placement_groups = []
+        super(VirtualNetworkFunction, self).__init__(name)
+
+    def add_placement_group(self, group):
+        self._placement_groups.append(group)
+
+    def compose(self, image_name, cloud_init="", cloud_init_file="", endpoint=None, mon_params=[],
+                mon_port=8888, mgmt_port=8888, num_vlr_count=1, num_ivlr_count=1,
+                num_vms=1, image_md5sum=None, mano_ut=False):
+        self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+        self.id = str(uuid.uuid1())
+        vnfd = self.descriptor.vnfd.add()
+        vnfd.id = self.id
+        vnfd.name = self.name
+        vnfd.short_name = self.name
+        vnfd.vendor = 'RIFT.io'
+        vnfd.logo = 'rift_logo.png'
+        vnfd.description = 'This is an example RIFT.ware VNF'
+        vnfd.version = '1.0'
+
+        self.vnfd = vnfd
+
+        if mano_ut is True:
+            internal_vlds = []
+            for i in range(num_ivlr_count):
+                internal_vld = vnfd.internal_vld.add()
+                internal_vld.id = 'ivld%s' % i
+                internal_vld.name = 'fabric%s' % i
+                internal_vld.short_name = 'fabric%s' % i
+                internal_vld.description = 'Virtual link for internal fabric%s' % i
+                internal_vld.type_yang = 'ELAN'
+                internal_vlds.append(internal_vld)
+
+        for i in range(num_vlr_count):
+            cp = vnfd.connection_point.add()
+            cp.type_yang = 'VPORT'
+            cp.name = '%s/cp%d' % (self.name, i)
+
+        if endpoint is not None:
+            endp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_HttpEndpoint(
+                    path=endpoint, port=mon_port, polling_interval_secs=2
+                    )
+            vnfd.http_endpoint.append(endp)
+
+        # Monitoring params
+        for monp_dict in mon_params:
+            monp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_MonitoringParam.from_dict(monp_dict)
+            monp.http_endpoint_ref = endpoint
+            vnfd.monitoring_param.append(monp)
+
+
+        for i in range(num_vms):
+            # VDU Specification
+            vdu = vnfd.vdu.add()
+            vdu.id = 'iovdu_%s' % i
+            vdu.name = 'iovdu_%s' % i
+            vdu.count = 1
+            # vdu.mgmt_vpci = '0000:00:20.0'
+
+            # specify the VM flavor
+            if use_epa:
+                vdu.vm_flavor.vcpu_count = 4
+                vdu.vm_flavor.memory_mb = 1024
+                vdu.vm_flavor.storage_gb = 4
+            else:
+                vdu.vm_flavor.vcpu_count = 1
+                vdu.vm_flavor.memory_mb = 512
+                vdu.vm_flavor.storage_gb = 4
+
+            # Management interface
+            mgmt_intf = vnfd.mgmt_interface
+            mgmt_intf.vdu_id = vdu.id
+            mgmt_intf.port = mgmt_port
+            mgmt_intf.dashboard_params.path = endpoint
+            mgmt_intf.dashboard_params.port = mgmt_port
+
+            if cloud_init_file and len(cloud_init_file):
+                vdu.cloud_init_file = cloud_init_file
+            else:
+                vdu.cloud_init = cloud_init
+                if aws:
+                    vdu.cloud_init += "  - [ systemctl, restart, --no-block, elastic-network-interfaces.service ]\n"
+
+            # sepcify the guest EPA
+            if use_epa:
+                vdu.guest_epa.trusted_execution = False
+                vdu.guest_epa.mempage_size = 'LARGE'
+                vdu.guest_epa.cpu_pinning_policy = 'DEDICATED'
+                vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER'
+                vdu.guest_epa.numa_node_policy.node_cnt = 2
+                vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT'
+
+                node = vdu.guest_epa.numa_node_policy.node.add()
+                node.id = 0
+                node.memory_mb = 512
+                node.vcpu = [0, 1]
+
+                node = vdu.guest_epa.numa_node_policy.node.add()
+                node.id = 1
+                node.memory_mb = 512
+                node.vcpu = [2, 3]
+
+                # specify the vswitch EPA
+                vdu.vswitch_epa.ovs_acceleration = 'DISABLED'
+                vdu.vswitch_epa.ovs_offload = 'DISABLED'
+
+                # Specify the hypervisor EPA
+                vdu.hypervisor_epa.type_yang = 'PREFER_KVM'
+
+                # Specify the host EPA
+                # vdu.host_epa.cpu_model = 'PREFER_SANDYBRIDGE'
+                # vdu.host_epa.cpu_arch = 'PREFER_X86_64'
+                # vdu.host_epa.cpu_vendor = 'PREFER_INTEL'
+                # vdu.host_epa.cpu_socket_count = 2
+                # vdu.host_epa.cpu_core_count = 8
+                # vdu.host_epa.cpu_core_thread_count = 2
+                # vdu.host_epa.cpu_feature = ['PREFER_AES', 'REQUIRE_VME', 'PREFER_MMX','REQUIRE_SSE2']
+
+            if aws:
+                vdu.image = 'rift-ping-pong'
+            else:
+                vdu.image = image_name
+                if image_md5sum is not None:
+                    vdu.image_checksum = image_md5sum
+
+            if mano_ut is True:
+                for i in range(num_ivlr_count):
+                    internal_cp = vdu.internal_connection_point.add()
+                    if vnfd.name.find("ping") >= 0:
+                        cp_name = "ping"
+                    else:
+                        cp_name = "pong"
+                    internal_cp.name = cp_name + "/icp{}".format(i)
+                    internal_cp.id = cp_name + "/icp{}".format(i)
+                    internal_cp.type_yang = 'VPORT'
+                    internal_vlds[i].internal_connection_point_ref.append(internal_cp.id)
+
+                    internal_interface = vdu.internal_interface.add()
+                    internal_interface.name = 'fab%d' % i
+                    internal_interface.vdu_internal_connection_point_ref = internal_cp.id
+                    internal_interface.virtual_interface.type_yang = 'VIRTIO'
+
+                    # internal_interface.virtual_interface.vpci = '0000:00:1%d.0'%i
+
+            for i in range(num_vlr_count):
+                external_interface = vdu.external_interface.add()
+                external_interface.name = 'eth%d' % i
+                external_interface.vnfd_connection_point_ref = '%s/cp%d' % (self.name, i)
+                if use_epa:
+                    external_interface.virtual_interface.type_yang = 'VIRTIO'
+                else:
+                    external_interface.virtual_interface.type_yang = 'VIRTIO'
+                # external_interface.virtual_interface.vpci = '0000:00:2%d.0'%i
+
+        for group in self._placement_groups:
+            placement_group = vnfd.placement_groups.add()
+            placement_group.name = group.name
+            placement_group.requirement = group.requirement
+            placement_group.strategy = group.strategy
+            if group.vdu_list:
+                ### Add specific VDUs to placement group
+                for vdu in group.vdu_list:
+                    member_vdu = placement_group.member_vdus.add()
+                    member_vdu.member_vdu_ref = vdu.id
+            else:
+                ### Add all VDUs to placement group
+                for vdu in vnfd.vdu:
+                    member_vdu = placement_group.member_vdus.add()
+                    member_vdu.member_vdu_ref = vdu.id
+
+
+    def write_to_file(self, outdir, output_format):
+        dirpath = "%s/%s" % (outdir, self.name)
+        if not os.path.exists(dirpath):
+            os.makedirs(dirpath)
+        super(VirtualNetworkFunction, self).write_to_file(['vnfd', 'rw-vnfd'],
+                                                          dirpath,
+                                                          output_format)
+        self.add_scripts(outdir)
+
+    def add_scripts(self, outdir):
+        script_dir = os.path.join(outdir, self.name, 'cloud_init')
+        try:
+            os.makedirs(script_dir)
+        except OSError:
+            if not os.path.isdir(script_dir):
+                raise
+
+        if 'ping' in self.name:
+            script_file = os.path.join(script_dir, 'ping_cloud_init.cfg')
+            cfg = PING_USERDATA_FILE
+        else:
+            script_file = os.path.join(script_dir, 'pong_cloud_init.cfg')
+            cfg = PONG_USERDATA_FILE
+
+        with open(script_file, "w") as f:
+            f.write("{}".format(cfg))
+
+
+class NetworkService(ManoDescriptor):
+    def __init__(self, name):
+        super(NetworkService, self).__init__(name)
+        self._scale_groups = []
+        self.vnfd_config = {}
+        self._placement_groups = []
+
+    def ping_config(self, mano_ut, use_ns_init_conf):
+        suffix = ''
+        if mano_ut:
+            ping_cfg = r'''
+#!/bin/bash
+
+echo "!!!!!!!! Executed ping Configuration !!!!!!!!!"
+            '''
+        else:
+            ping_cfg = r'''
+#!/bin/bash
+
+# Rest API config
+ping_mgmt_ip='<rw_mgmt_ip>'
+ping_mgmt_port=18888
+
+# VNF specific configuration
+pong_server_ip='<rw_connection_point_name pong_vnfd%s/cp0>'
+ping_rate=5
+server_port=5555
+
+# Make rest API calls to configure VNF
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set server info for ping!"
+    exit $rc
+fi
+
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"rate\":$ping_rate}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set ping rate!"
+    exit $rc
+fi
+
+''' % suffix
+            if use_ns_init_conf:
+                ping_cfg += "exit 0\n"
+            else:
+                ping_cfg +='''
+output=$(curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"enable\":true}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
+if [[ $output == *"Internal Server Error"* ]]
+then
+    echo $output
+    exit 3
+else
+    echo $output
+fi
+
+exit 0
+'''
+        return ping_cfg
+
+    def pong_config(self, mano_ut, use_ns_init_conf):
+        suffix = ''
+        if mano_ut:
+            pong_cfg = r'''
+#!/bin/bash
+
+echo "!!!!!!!! Executed pong Configuration !!!!!!!!!"
+            '''
+        else:
+            pong_cfg = r'''
+#!/bin/bash
+
+# Rest API configuration
+pong_mgmt_ip='<rw_mgmt_ip>'
+pong_mgmt_port=18889
+# username=<rw_username>
+# password=<rw_password>
+
+# VNF specific configuration
+pong_server_ip='<rw_connection_point_name pong_vnfd%s/cp0>'
+server_port=5555
+
+# Make Rest API calls to configure VNF
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set server(own) info for pong!"
+    exit $rc
+fi
+
+''' % suffix
+
+            if use_ns_init_conf:
+                pong_cfg += "exit 0\n"
+            else:
+                pong_cfg +='''
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"enable\":true}" \
+    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to enable pong service!"
+    exit $rc
+fi
+
+exit 0
+'''
+        return pong_cfg
+
+    def pong_fake_juju_config(self, vnf_config):
+
+        if vnf_config:
+            # Select "script" configuration
+            vnf_config.juju.charm = 'clearwater-aio-proxy'
+
+            # Set the initital-config
+            vnf_config.create_initial_config_primitive()
+            init_config = VnfdYang.InitialConfigPrimitive.from_dict({
+                "seq": 1,
+                "name": "config",
+                "parameter": [
+                    {"name": "proxied_ip", "value": "<rw_mgmt_ip>"},
+                ]
+            })
+            vnf_config.initial_config_primitive.append(init_config)
+
+            init_config_action = VnfdYang.InitialConfigPrimitive.from_dict({
+                "seq": 2,
+                "name": "action1",
+                "parameter": [
+                    {"name": "Pong Connection Point", "value": "pong_vnfd/cp0"},
+                ]
+            })
+            vnf_config.initial_config_primitive.append(init_config_action)
+            init_config_action = VnfdYang.InitialConfigPrimitive.from_dict({
+                "seq": 3,
+                "name": "action2",
+                "parameter": [
+                    {"name": "Ping Connection Point", "value": "ping_vnfd/cp0"},
+                ]
+            })
+            vnf_config.initial_config_primitive.append(init_config_action)
+
+            # Config parameters can be taken from config.yaml and
+            # actions from actions.yaml in the charm
+            # Config to set the home domain
+            vnf_config.create_service_primitive()
+            config = VnfdYang.ServicePrimitive.from_dict({
+                "name": "config",
+                "parameter": [
+                    {"name": "home_domain", "data_type": "STRING"},
+                    {"name": "base_number", "data_type": "STRING"},
+                    {"name": "number_count", "data_type": "INTEGER"},
+                    {"name": "password", "data_type": "STRING"},
+                ]
+            })
+            vnf_config.service_primitive.append(config)
+
+            config = VnfdYang.ServicePrimitive.from_dict({
+                "name": "create-update-user",
+                # "user-defined-script":"/tmp/test.py",
+                "parameter": [
+                    {"name": "number", "data_type": "STRING", "mandatory": True},
+                    {"name": "password", "data_type": "STRING", "mandatory": True},
+                ]
+            })
+            vnf_config.service_primitive.append(config)
+
+            config = VnfdYang.ServicePrimitive.from_dict({
+                "name": "delete-user",
+                "parameter": [
+                    {"name": "number", "data_type": "STRING", "mandatory": True},
+                ]
+            })
+            vnf_config.service_primitive.append(config)
+
+    def default_config(self, const_vnfd, vnfd, mano_ut, use_ns_init_conf):
+        vnf_config = vnfd.vnfd.vnf_configuration
+
+        vnf_config.config_attributes.config_priority = 0
+        vnf_config.config_attributes.config_delay = 0
+
+        # Select "script" configuration
+        vnf_config.script.script_type = 'bash'
+
+        if vnfd.name == 'pong_vnfd' or vnfd.name == 'pong_vnfd_with_epa' or vnfd.name == 'pong_vnfd_aws':
+            vnf_config.config_attributes.config_priority = 1
+            vnf_config.config_template = self.pong_config(mano_ut, use_ns_init_conf)
+            # First priority config delay will delay the entire NS config delay
+            if mano_ut is False:
+                vnf_config.config_attributes.config_delay = 60
+            else:
+                # This is PONG and inside mano_ut
+                # This is test only
+                vnf_config.config_attributes.config_delay = 10
+                # vnf_config.config_template = self.pong_config(vnf_config, use_ns_init_conf)
+
+        if vnfd.name == 'ping_vnfd' or vnfd.name == 'ping_vnfd_with_epa' or vnfd.name == 'ping_vnfd_aws':
+            vnf_config.config_attributes.config_priority = 2
+            vnf_config.config_template = self.ping_config(mano_ut, use_ns_init_conf)
+
+    def ns_config(self, nsd, vnfd_list, mano_ut):
+        # Used by scale group
+        if mano_ut:
+            nsd.service_primitive.add().from_dict(
+                {
+                    "name": "ping config",
+                    "user_defined_script": "{}".format(os.path.join(
+                        os.environ['RIFT_ROOT'],
+                        'modules/core/mano',
+                        'examples/ping_pong_ns/rift/mano/examples',
+                        'ping_config_ut.sh'))
+                })
+        else:
+            nsd.service_primitive.add().from_dict(
+                {
+                    "name": "ping config",
+                    "user_defined_script": "ping_config.py"
+                })
+
+    def ns_initial_config(self, nsd):
+        nsd.initial_config_primitive.add().from_dict(
+            {
+                "seq": 1,
+                "name": "start traffic",
+                "user_defined_script": "start_traffic.py",
+                "parameter": [
+                    {
+                        'name': 'userid',
+                        'value': 'rift',
+                    },
+                ],
+            }
+        )
+
+    def add_scale_group(self, scale_group):
+        self._scale_groups.append(scale_group)
+
+    def add_placement_group(self, placement_group):
+        self._placement_groups.append(placement_group)
+
+    def create_mon_params(self, vnfds):
+        NsdMonParam = NsdYang.YangData_Nsd_NsdCatalog_Nsd_MonitoringParam
+        param_id = 1
+        for vnfd_obj in vnfds:
+            for mon_param in vnfd_obj.vnfd.monitoring_param:
+                nsd_monp = NsdMonParam.from_dict({
+                        'id': str(param_id),
+                        'name': mon_param.name,
+                        'aggregation_type': "AVERAGE",
+                        'value_type': mon_param.value_type,
+                        'vnfd_monitoring_param': [
+                                {'vnfd_id_ref': vnfd_obj.vnfd.id,
+                                'vnfd_monitoring_param_ref': mon_param.id}]
+                        })
+
+                self.nsd.monitoring_param.append(nsd_monp)
+                param_id += 1
+
+
+
+
+    def compose(self, vnfd_list, cpgroup_list, mano_ut, use_ns_init_conf=True):
+
+        if mano_ut:
+            # Disable NS initial config primitive
+            use_ns_init_conf=False
+
+        self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog()
+        self.id = str(uuid.uuid1())
+        nsd = self.descriptor.nsd.add()
+        self.nsd = nsd
+        nsd.id = self.id
+        nsd.name = self.name
+        nsd.short_name = self.name
+        nsd.vendor = 'RIFT.io'
+        nsd.logo = 'rift_logo.png'
+        nsd.description = 'Toy NS'
+        nsd.version = '1.0'
+        nsd.input_parameter_xpath.append(
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:vendor",
+                    )
+                )
+
+        ip_profile = nsd.ip_profiles.add()
+        ip_profile.name = "InterVNFLink"
+        ip_profile.description  = "Inter VNF Link"
+        ip_profile.ip_profile_params.ip_version = "ipv4"
+        ip_profile.ip_profile_params.subnet_address = "31.31.31.0/24"
+        ip_profile.ip_profile_params.gateway_address = "31.31.31.210"
+        
+        vld_id = 1
+        for cpgroup in cpgroup_list:
+            vld = nsd.vld.add()
+            vld.id = 'ping_pong_vld%s' % vld_id
+            vld_id += 1
+            vld.name = 'ping_pong_vld'  # hard coded
+            vld.short_name = vld.name
+            vld.vendor = 'RIFT.io'
+            vld.description = 'Toy VL'
+            vld.version = '1.0'
+            vld.type_yang = 'ELAN'
+            vld.ip_profile_ref = 'InterVNFLink'
+            for cp in cpgroup:
+                cpref = vld.vnfd_connection_point_ref.add()
+                cpref.member_vnf_index_ref = cp[0]
+                cpref.vnfd_id_ref = cp[1]
+                cpref.vnfd_connection_point_ref = cp[2]
+
+        vnfd_index_map = {}
+        member_vnf_index = 1
+        for vnfd in vnfd_list:
+            for i in range(vnfd.instance_count):
+                constituent_vnfd = nsd.constituent_vnfd.add()
+                constituent_vnfd.member_vnf_index = member_vnf_index
+                vnfd_index_map[vnfd] = member_vnf_index
+
+                # Set the start by default to false  for ping vnfd,
+                # if scaling is enabled
+                if (len(self._scale_groups) and
+                    vnfd.descriptor.vnfd[0].name == 'ping_vnfd'):
+                    constituent_vnfd.start_by_default = False
+
+                constituent_vnfd.vnfd_id_ref = vnfd.descriptor.vnfd[0].id
+                self.default_config(constituent_vnfd, vnfd, mano_ut,
+                                    use_ns_init_conf,)
+                member_vnf_index += 1
+
+        # Enable config primitives if either mano_ut or
+        # scale groups are enabled
+        if mano_ut or len(self._scale_groups):
+            self.ns_config(nsd, vnfd_list, mano_ut)
+
+        # Add NS initial config to start traffic
+        if use_ns_init_conf:
+            self.ns_initial_config(nsd)
+
+        for scale_group in self._scale_groups:
+            group_desc = nsd.scaling_group_descriptor.add()
+            group_desc.name = scale_group.name
+            group_desc.max_instance_count = scale_group.max_count
+            group_desc.min_instance_count = scale_group.min_count
+            for vnfd, count in scale_group.vnfd_count_map.items():
+                member = group_desc.vnfd_member.add()
+                member.member_vnf_index_ref = vnfd_index_map[vnfd]
+                member.count = count
+
+            for trigger in scale_group.config_action:
+                config_action = group_desc.scaling_config_action.add()
+                config_action.trigger = trigger
+                config = scale_group.config_action[trigger]
+                config_action.ns_config_primitive_name_ref = config['ns-config-primitive-name-ref']
+
+        for placement_group in self._placement_groups:
+            group = nsd.placement_groups.add()
+            group.name = placement_group.name
+            group.strategy = placement_group.strategy
+            group.requirement = placement_group.requirement
+            for member_vnfd in placement_group.vnfd_list:
+                member = group.member_vnfd.add()
+                member.vnfd_id_ref = member_vnfd.descriptor.vnfd[0].id
+                member.member_vnf_index_ref = vnfd_index_map[member_vnfd]
+
+        # self.create_mon_params(vnfd_list)
+
+    def write_config(self, outdir, vnfds):
+
+        converter = config_data.ConfigPrimitiveConvertor()
+        yaml_data = converter.extract_nsd_config(self.nsd)
+
+        ns_config_dir = os.path.join(outdir, self.name, "ns_config")
+        os.makedirs(ns_config_dir, exist_ok=True)
+        vnf_config_dir = os.path.join(outdir, self.name, "vnf_config")
+        os.makedirs(vnf_config_dir, exist_ok=True)
+
+        if len(yaml_data):
+            with open('%s/%s.yaml' % (ns_config_dir, self.id), "w") as fh:
+                fh.write(yaml_data)
+
+        for i, vnfd in enumerate(vnfds, start=1):
+            yaml_data = converter.extract_vnfd_config(vnfd)
+
+            if len(yaml_data):
+                with open('%s/%s__%s.yaml' % (vnf_config_dir, vnfd.id, i), "w") as fh:
+                    fh.write(yaml_data)
+
+    def write_initial_config_script(self, outdir):
+        script_name = 'start_traffic.py'
+
+        src_path = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
+        script_src = os.path.join(src_path, script_name)
+        if not os.path.exists(script_src):
+            src_path = os.path.join(os.environ['RIFT_ROOT'],
+            'modules/core/mano/examples/ping_pong_ns/rift/mano/examples')
+            script_src = os.path.join(src_path, script_name)
+
+        dest_path = os.path.join(outdir, 'scripts')
+        os.makedirs(dest_path, exist_ok=True)
+
+        shutil.copy2(script_src, dest_path)
+
+    def write_to_file(self, outdir, output_format):
+        dirpath = os.path.join(outdir, self.name)
+        if not os.path.exists(dirpath):
+            os.makedirs(dirpath)
+
+        super(NetworkService, self).write_to_file(["nsd", "rw-nsd"],
+                                                  dirpath,
+                                                  output_format)
+
+        # Write the initial config script
+        self.write_initial_config_script(dirpath)
+
+
+def get_ping_mon_params(path):
+    return [
+            {
+                'id': '1',
+                'name': 'ping-request-tx-count',
+                'http_endpoint_ref': path,
+                'json_query_method': "NAMEKEY",
+                'value_type': "INT",
+                'description': 'no of ping requests',
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                },
+
+            {
+                'id': '2',
+                'name': 'ping-response-rx-count',
+                'http_endpoint_ref': path,
+                'json_query_method': "NAMEKEY",
+                'value_type': "INT",
+                'description': 'no of ping responses',
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                },
+            ]
+
+
+def get_pong_mon_params(path):
+    return [
+            {
+                'id': '1',
+                'name': 'ping-request-rx-count',
+                'http_endpoint_ref': path,
+                'json_query_method': "NAMEKEY",
+                'value_type': "INT",
+                'description': 'no of ping requests',
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                },
+
+            {
+                'id': '2',
+                'name': 'ping-response-tx-count',
+                'http_endpoint_ref': path,
+                'json_query_method': "NAMEKEY",
+                'value_type': "INT",
+                'description': 'no of ping responses',
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                },
+            ]
+
+
+class ScaleGroup(object):
+    def __init__(self, name, min_count=1, max_count=1):
+        self.name = name
+        self.min_count = min_count
+        self.max_count = max_count
+        self.vnfd_count_map = {}
+        self.config_action = {}
+
+    def add_vnfd(self, vnfd, vnfd_count):
+        self.vnfd_count_map[vnfd] = vnfd_count
+
+    def add_config(self):
+        self.config_action['post_scale_out']= {'ns-config-primitive-name-ref':
+                                               'ping config'}
+
+class PlacementGroup(object):
+    def __init__(self, name):
+        self.name = name
+        self.strategy = ''
+        self.requirement = ''
+
+    def add_strategy(self, strategy):
+        self.strategy = strategy
+
+    def add_requirement(self, requirement):
+        self.requirement = requirement
+
+class NsdPlacementGroup(PlacementGroup):
+    def __init__(self, name):
+        self.vnfd_list = []
+        super(NsdPlacementGroup, self).__init__(name)
+
+    def add_member(self, vnfd):
+        self.vnfd_list.append(vnfd)
+
+
+class VnfdPlacementGroup(PlacementGroup):
+    def __init__(self, name):
+        self.vdu_list = []
+        super(VnfdPlacementGroup, self).__init__(name)
+
+    def add_member(self, vdu):
+        self.vdu_list.append(vdu)
+
+
+
+
+def generate_ping_pong_descriptors(fmt="json",
+                                   write_to_file=False,
+                                   out_dir="./",
+                                   pingcount=NUM_PING_INSTANCES,
+                                   external_vlr_count=1,
+                                   internal_vlr_count=1,
+                                   num_vnf_vms=1,
+                                   ping_md5sum=None,
+                                   pong_md5sum=None,
+                                   mano_ut=False,
+                                   use_scale_group=False,
+                                   ping_fmt=None,
+                                   pong_fmt=None,
+                                   nsd_fmt=None,
+                                   use_mon_params=True,
+                                   ping_userdata=None,
+                                   pong_userdata=None,
+                                   ex_ping_userdata=None,
+                                   ex_pong_userdata=None,
+                                   use_placement_group=True,
+                                   use_ns_init_conf=True,
+                                   ):
+    # List of connection point groups
+    # Each connection point group refers to a virtual link
+    # the CP group consists of tuples of connection points
+    cpgroup_list = []
+    for i in range(external_vlr_count):
+        cpgroup_list.append([])
+
+    suffix = ''
+    ping = VirtualNetworkFunction("ping_vnfd%s" % (suffix), pingcount)
+
+    if use_placement_group:
+        ### Add group name Eris
+        group = VnfdPlacementGroup('Eris')
+        group.add_strategy('COLOCATION')
+        group.add_requirement('''Place this VM on the Kuiper belt object Eris''')
+        ping.add_placement_group(group)
+
+    # ping = VirtualNetworkFunction("ping_vnfd", pingcount)
+    if not ping_userdata:
+        ping_userdata = PING_USERDATA_FILE
+
+    if ex_ping_userdata:
+        ping_userdata = '''\
+{ping_userdata}
+{ex_ping_userdata}
+        '''.format(
+            ping_userdata=ping_userdata,
+            ex_ping_userdata=ex_ping_userdata
+        )
+
+    ping.compose(
+            "Fedora-x86_64-20-20131211.1-sda-ping.qcow2",
+            ping_userdata,
+            use_ping_cloud_init_file,
+            "api/v1/ping/stats",
+            get_ping_mon_params("api/v1/ping/stats") if use_mon_params else [],
+            mon_port=18888,
+            mgmt_port=18888,
+            num_vlr_count=external_vlr_count,
+            num_ivlr_count=internal_vlr_count,
+            num_vms=num_vnf_vms,
+            image_md5sum=ping_md5sum,
+            mano_ut=mano_ut,
+            )
+
+    pong = VirtualNetworkFunction("pong_vnfd%s" % (suffix))
+
+    if use_placement_group:
+        ### Add group name Weywot
+        group = VnfdPlacementGroup('Weywot')
+        group.add_strategy('COLOCATION')
+        group.add_requirement('''Place this VM on the Kuiper belt object Weywot''')
+        pong.add_placement_group(group)
+
+
+    # pong = VirtualNetworkFunction("pong_vnfd")
+
+    if not pong_userdata:
+        pong_userdata = PONG_USERDATA_FILE
+
+    if ex_pong_userdata:
+        pong_userdata = '''\
+{pong_userdata}
+{ex_pong_userdata}
+        '''.format(
+            pong_userdata=pong_userdata,
+            ex_pong_userdata=ex_pong_userdata
+        )
+
+
+    pong.compose(
+            "Fedora-x86_64-20-20131211.1-sda-pong.qcow2",
+            pong_userdata,
+            use_pong_cloud_init_file,
+            "api/v1/pong/stats",
+            get_pong_mon_params("api/v1/pong/stats") if use_mon_params else [],
+            mon_port=18889,
+            mgmt_port=18889,
+            num_vlr_count=external_vlr_count,
+            num_ivlr_count=internal_vlr_count,
+            num_vms=num_vnf_vms,
+            image_md5sum=pong_md5sum,
+            mano_ut=mano_ut,
+            )
+
+    # Initialize the member VNF index
+    member_vnf_index = 1
+
+    # define the connection point groups
+    for index, cp_group in enumerate(cpgroup_list):
+        desc_id = ping.descriptor.vnfd[0].id
+        filename = 'ping_vnfd{}/cp{}'.format(suffix, index)
+
+        for idx in range(pingcount):
+            cp_group.append((
+                member_vnf_index,
+                desc_id,
+                filename,
+                ))
+
+            member_vnf_index += 1
+
+        desc_id = pong.descriptor.vnfd[0].id
+        filename = 'pong_vnfd{}/cp{}'.format(suffix, index)
+
+        cp_group.append((
+            member_vnf_index,
+            desc_id,
+            filename,
+            ))
+
+        member_vnf_index += 1
+
+    vnfd_list = [ping, pong]
+
+    nsd_catalog = NetworkService("ping_pong_nsd%s" % (suffix))
+
+    if use_scale_group:
+        group = ScaleGroup("ping_group", max_count=10)
+        group.add_vnfd(ping, 1)
+        group.add_config()
+        nsd_catalog.add_scale_group(group)
+
+    if use_placement_group:
+        ### Add group name Orcus
+        group = NsdPlacementGroup('Orcus')
+        group.add_strategy('COLOCATION')
+        group.add_requirement('''Place this VM on the Kuiper belt object Orcus''')
+
+        for member_vnfd in vnfd_list:
+            group.add_member(member_vnfd)
+
+        nsd_catalog.add_placement_group(group)
+
+        ### Add group name Quaoar
+        group = NsdPlacementGroup('Quaoar')
+        group.add_strategy('COLOCATION')
+        group.add_requirement('''Place this VM on the Kuiper belt object Quaoar''')
+
+        for member_vnfd in vnfd_list:
+            group.add_member(member_vnfd)
+
+        nsd_catalog.add_placement_group(group)
+
+
+    nsd_catalog.compose(vnfd_list,
+                        cpgroup_list,
+                        mano_ut,
+                        use_ns_init_conf=use_ns_init_conf,)
+
+    if write_to_file:
+        ping.write_to_file(out_dir, ping_fmt if ping_fmt is not None else fmt)
+        pong.write_to_file(out_dir, pong_fmt if ping_fmt is not None else fmt)
+        nsd_catalog.write_config(out_dir, vnfd_list)
+        nsd_catalog.write_to_file(out_dir, ping_fmt if nsd_fmt is not None else fmt)
+
+    return (ping, pong, nsd_catalog)
+
+
+def main(argv=sys.argv[1:]):
+    global outdir, output_format, use_epa, aws, use_ping_cloud_init_file, use_pong_cloud_init_file
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-o', '--outdir', default='.')
+    parser.add_argument('-f', '--format', default='json')
+    parser.add_argument('-e', '--epa', action="store_true", default=False)
+    parser.add_argument('-a', '--aws', action="store_true", default=False)
+    parser.add_argument('-n', '--pingcount', default=NUM_PING_INSTANCES)
+    parser.add_argument('--ping-image-md5')
+    parser.add_argument('--pong-image-md5')
+    parser.add_argument('--ping-cloud-init', default=None)
+    parser.add_argument('--pong-cloud-init', default=None)
+    args = parser.parse_args()
+    outdir = args.outdir
+    output_format = args.format
+    use_epa = args.epa
+    aws = args.aws
+    pingcount = args.pingcount
+    use_ping_cloud_init_file = args.ping_cloud_init
+    use_pong_cloud_init_file = args.pong_cloud_init
+
+    generate_ping_pong_descriptors(args.format, True, args.outdir, pingcount,
+                                   ping_md5sum=args.ping_image_md5, pong_md5sum=args.pong_image_md5,
+                                   mano_ut=False,
+                                   use_scale_group=False,)
+
+if __name__ == "__main__":
+    main()
diff --git a/examples/ping_pong_ns/rift/mano/examples/start_traffic.py b/examples/ping_pong_ns/rift/mano/examples/start_traffic.py
new file mode 100755 (executable)
index 0000000..af6f62f
--- /dev/null
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def start_traffic(yaml_cfg, logger):
+    '''Use curl and set admin status to enable on pong and ping vnfs'''
+
+    def enable_service(mgmt_ip, port, vnf_type):
+        curl_cmd = 'curl -D /dev/stdout -H "Accept: application/vnd.yang.data' \
+                   '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+                   '-X POST -d "{{\\"enable\\":true}}" http://{mgmt_ip}:' \
+                   '{mgmt_port}/api/v1/{vnf_type}/adminstatus/state'. \
+                   format(
+                       mgmt_ip=mgmt_ip,
+                       mgmt_port=port,
+                       vnf_type=vnf_type)
+
+        logger.debug("Executing cmd: %s", curl_cmd)
+        subprocess.check_call(curl_cmd, shell=True)
+
+    # Enable pong service first
+    for index, vnfr in yaml_cfg['vnfr'].items():
+        logger.debug("VNFR {}: {}".format(index, vnfr))
+
+        # Check if it is pong vnf
+        if 'pong_vnfd' in vnfr['name']:
+            vnf_type = 'pong'
+            port = 18889
+            enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+            break
+
+    # Add a delay to provide pong port to come up
+    time.sleep(0.1)
+
+    # Enable ping service next
+    for index, vnfr in yaml_cfg['vnfr'].items():
+        logger.debug("VNFR {}: {}".format(index, vnfr))
+
+        # Check if it is pong vnf
+        if 'ping_vnfd' in vnfr['name']:
+            vnf_type = 'ping'
+            port = 18888
+            enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+            break
+
+def main(argv=sys.argv[1:]):
+    try:
+        parser = argparse.ArgumentParser()
+        parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+        parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+        args = parser.parse_args()
+
+        run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+        if not os.path.exists(run_dir):
+            os.makedirs(run_dir)
+        log_file = "{}/ping_pong_start_traffic-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+        logging.basicConfig(filename=log_file, level=logging.DEBUG)
+        logger = logging.getLogger()
+
+    except Exception as e:
+        print("Exception in {}: {}".format(__file__, e))
+        sys.exit(1)
+
+    try:
+        ch = logging.StreamHandler()
+        if args.verbose:
+            ch.setLevel(logging.DEBUG)
+        else:
+            ch.setLevel(logging.INFO)
+
+        # create formatter and add it to the handlers
+        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+        ch.setFormatter(formatter)
+        logger.addHandler(ch)
+
+    except Exception as e:
+        logger.exception(e)
+        raise e
+
+    try:
+        yaml_str = args.yaml_cfg_file.read()
+        # logger.debug("Input YAML file:\n{}".format(yaml_str))
+        yaml_cfg = yaml.load(yaml_str)
+        logger.debug("Input YAML: {}".format(yaml_cfg))
+
+        start_traffic(yaml_cfg, logger)
+
+    except Exception as e:
+        logger.exception(e)
+        raise e
+
+if __name__ == "__main__":
+    main()
diff --git a/examples/ping_pong_ns/rift_logo.png b/examples/ping_pong_ns/rift_logo.png
new file mode 100644 (file)
index 0000000..09b47c7
Binary files /dev/null and b/examples/ping_pong_ns/rift_logo.png differ
diff --git a/examples/ping_pong_ns/stand_up_ping_pong b/examples/ping_pong_ns/stand_up_ping_pong
new file mode 100644 (file)
index 0000000..a8e5e55
--- /dev/null
@@ -0,0 +1,232 @@
+#!/bin/bash
+
+# arguments
+if [ -z $1 ] 
+then 
+    echo "must supply ip for launchpad"
+    exit -1
+else
+    lp_ip=${1}
+fi
+
+username=$(whoami)
+
+# make sure we're in a rift shell
+if [ -z $RIFT_ROOT ] 
+then 
+    echo "must be in a rift-shell"
+    exit -1
+fi
+
+# make sure the system is up
+system_is_up() {
+    response=$(curl --silent --insecure \
+                   -o /dev/null \
+                   --write-out "%{http_code}"\
+                   --user admin:admin \
+                   https://${lp_ip}:8008/api/config/launchpad-config \
+                   --request GET \
+           )
+
+   if [ ${response} -eq 200 ]
+   then
+       return 0
+   else
+       if [ ${response} -eq 404 ]
+       then
+          # not running a launchpad!
+          echo "riftware is running on ${lp_ip} but there is no launchpad-config"
+          exit -1
+       fi
+       return 1
+   fi
+}
+
+echo -n "wait for system"
+while ! system_is_up
+do
+    echo -n "."
+    sleep 5s
+done
+
+echo "system is up"
+
+# setup the openstack account
+echo -n "adding account"
+
+post_json_to_rwrest() {
+   if [ -z "$1" ]
+   then
+     echo "must supply url"
+     exit -1
+   else
+       url=$1
+   fi
+
+   if [ -z "$2" ]
+   then
+     echo "must supply payload"
+     exit -1
+   else
+       payload="${2}"
+   fi
+
+   response=$(curl --silent --insecure \
+                  --header "content-type:application/vnd.yang.data+json" \
+                  --header "Accept:application/vnd.yang.data+json" \
+                  --user admin:admin \
+                  https://${lp_ip}:8008${url} \
+                  --request POST --data "${payload}" \
+          )
+
+    added_account=$(echo "${response}" | grep -e \"success|ok\" | wc -l)
+    already_exists=$(echo "${response}" | grep \"data-exists\" | wc -l)
+    success=$((added_account + already_exists))
+}
+
+account_payload=" {
+  \"account\": [
+    {
+      \"name\": \"OS\",
+      \"account-type\": \"openstack\",
+      \"openstack\": {
+        \"auth_url\": \"http://engstack.eng.riftio.com:5000/v3/\",
+        \"secret\": \"mypasswd\",
+        \"key\": \"${username}_automation\",
+        \"tenant\": \"${username}_automation\",
+        \"mgmt-network\": \"private\"
+      }
+    }
+  ]
+}"
+
+post_json_to_rwrest "/api/config/cloud/account" "${account_payload}"
+
+if [ ${success} -ne 1 ];
+then
+    echo -en "\r" # clear pending line
+    echo "failed to add cloud account:"
+    echo ${response}
+    exit 0
+else
+    echo " success"
+fi
+
+# onboard descriptors
+cd $RIFT_BUILD/modules/core/mano/src/core_mano-build/examples/ping_pong_ns
+
+wait_for_package() {
+   if [ -z "$1" ]
+   then
+     echo "must supply transaction id to wait for"
+     exit -1
+   fi
+
+   response=$(curl --silent --insecure https://${lp_ip}:4567/api/upload/${transaction_id}/state)
+   transaction_state=$(echo ${response} | awk -F "status" '{print $2}' | awk '{print $2}')
+   transaction_state=${transaction_state:1:-2}
+   
+   if [ ${transaction_state} == "pending" ];
+   then
+       return 0
+   else
+       return 1
+   fi
+}
+
+upload_package() {
+   if [ -z "$1" ]
+   then
+     echo "must supply package to upload"
+     exit -1
+   else
+       package=$1
+   fi
+
+   echo -n "upload ${package} package"
+
+   response=$(curl --silent --insecure -F "descriptor=@${package}" https://${lp_ip}:4567/api/upload)
+   transaction_id=$(echo ${response} | awk '{print $2}')
+   transaction_id=${transaction_id:1:-2}
+
+   while wait_for_package transaction_id
+   do
+       echo -n "."
+       sleep 1s
+   done
+   if [ ${transaction_state} == "failure" ];
+   then
+       echo "failed"
+       status=1
+   else
+       echo "success"
+       status=0
+   fi
+
+}
+
+upload_package "ping_vnfd.tar.gz"
+ping_status=${status}
+upload_package "pong_vnfd.tar.gz"
+pong_status=${status}
+
+success=$((ping_status + pong_status))
+
+if [ ${success} -ne 0 ];
+then
+    echo -en "\r" # clear pending line
+    echo "cannot on-board nsd when a vnfd fails"
+    exit -1
+fi
+
+upload_package "ping_pong_nsd.tar.gz"
+if [ ${status} -ne 0 ];
+then
+    echo -en "\r" # clear pending line
+    echo "failed to on-board nsd"
+    exit -1
+fi
+
+# instantiate ping_pong_nsd
+echo "instantiate ping pong nsd"
+
+tag=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 4 | head -n 1)
+
+tmpdir="/tmp/${tag}"
+mkdir ${tmpdir}
+
+tar -xf ping_pong_nsd.tar.gz -C ${tmpdir}
+
+nsdfile="${tmpdir}/ping_pong_nsd/ping_pong_nsd.yaml"
+
+nsd_id=$(cat ${nsdfile} | grep "nsd:id" | head -n1 | awk '{print $2}')
+
+rm -r ${tmpdir}
+
+nsr_id=$(cat /proc/sys/kernel/random/uuid)
+nsd_payload="{
+    \"nsr\":[
+        {
+            \"id\":\"${nsr_id}\",
+            \"nsd-ref\":\"${nsd_id}\",
+            \"name\":\"${username}-${tag}-ping-pong-nsd\",
+            \"short-name\":\"pingpong\",
+            \"description\":\"ping pong nsd instantiated by ${username} with tag ${tag}\",
+            \"admin-status\":\"ENABLED\",
+            \"cloud-account\":\"OS\"
+        }
+    ]
+}"
+
+post_json_to_rwrest "/api/config/ns-instance-config" "${nsd_payload}"
+
+if [ ${success} -ne 1 ];
+then
+    echo -en "\r" # clear pending line
+    echo "failed to instantiate nsd:"
+    echo ${response}
+    exit -1
+else
+    echo " success"
+fi
+
diff --git a/foss.txt b/foss.txt
new file mode 100644 (file)
index 0000000..2614703
--- /dev/null
+++ b/foss.txt
@@ -0,0 +1,5 @@
+RIFT.core, common/python/rift/mano/tosca_translator, Openstack, Apache License 2.0, https://github.com/openstack/heat-translator
+RIFT.core, models/openmano/bin, OpenMANO CLI, Apache License 2.0, https://github.com/nfvlabs/openmano
+RIFT.core, rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector, OpenMANO, Apache License 2.0, https://github.com/nfvlabs/openmano
+RIFT.core, rwlaunchpad/plugins/rwimagemgr/etc, Openstack Glance scripts, unknown, unknown
+RIFT.core, rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy, quickproxy, unknown, unknown
diff --git a/manifest/LICENSE b/manifest/LICENSE
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/models/CMakeLists.txt b/models/CMakeLists.txt
new file mode 100644 (file)
index 0000000..dca2a03
--- /dev/null
@@ -0,0 +1,31 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 2014/12/11
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME models)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+  plugins
+  openmano
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/models/openmano/CMakeLists.txt b/models/openmano/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ad0cdc8
--- /dev/null
@@ -0,0 +1,27 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 2014/12/11
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  bin
+  src
+  python
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/models/openmano/bin/CMakeLists.txt b/models/openmano/bin/CMakeLists.txt
new file mode 100644 (file)
index 0000000..895823c
--- /dev/null
@@ -0,0 +1,12 @@
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(BEGIN)
+# Author(s): Austin Cormier
+# Creation Date: 1/11/2015
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+install(
+  PROGRAMS
+    openmano
+    openmano_cleanup.sh
+  DESTINATION usr/bin
+  COMPONENT ${PKG_LONG_NAME}
+)
diff --git a/models/openmano/bin/openmano b/models/openmano/bin/openmano
new file mode 100755 (executable)
index 0000000..3ea0654
--- /dev/null
@@ -0,0 +1,1401 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# PYTHON_ARGCOMPLETE_OK
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+openmano client used to interact with openmano-server (openmanod) 
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$09-oct-2014 09:09:48$"
+__version__="0.4.3-r467"
+version_date="Mar 2016"
+
+from argcomplete.completers import FilesCompleter
+import os
+import argparse
+import argcomplete
+import requests
+import json
+import yaml
+import logging
+#from jsonschema import validate as js_v, exceptions as js_e
+
+class ArgumentParserError(Exception): pass
+
+class OpenmanoCLIError(Exception): pass
+
+class ThrowingArgumentParser(argparse.ArgumentParser):
+    def error(self, message):
+        print "Error: %s" %message
+        print
+        self.print_usage()
+        #self.print_help()
+        print
+        print "Type 'openmano -h' for help"
+        raise ArgumentParserError
+
+
+def config(args):
+    print "OPENMANO_HOST: %s" %mano_host
+    print "OPENMANO_PORT: %s" %mano_port
+    if args.n:
+        logger.debug("resolving tenant and datacenter names")
+        mano_tenant_id = "None"
+        mano_tenant_name = "None"
+        mano_datacenter_id = "None"
+        mano_datacenter_name = "None"
+        try:
+            mano_tenant_id = _get_item_uuid("tenants", mano_tenant)
+            URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, mano_tenant_id)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text )
+            content = mano_response.json()
+            mano_tenant_name = content["tenant"]["name"]
+            URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, mano_tenant_id, mano_datacenter)
+            mano_response = requests.get(URLrequest)
+            logger.debug("openmano response: %s", mano_response.text )
+            content = mano_response.json()
+            if "error" not in content:
+                mano_datacenter_id = content["datacenter"]["uuid"]
+                mano_datacenter_name = content["datacenter"]["name"]
+        except OpenmanoCLIError:
+            pass
+        print "OPENMANO_TENANT: %s" %mano_tenant
+        print "    Id: %s" %mano_tenant_id
+        print "    Name: %s" %mano_tenant_name 
+        print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
+        print "    Id: %s" %mano_datacenter_id
+        print "    Name: %s" %mano_datacenter_name 
+    else:
+        print "OPENMANO_TENANT: %s" %mano_tenant
+        print "OPENMANO_DATACENTER: %s" %str (mano_datacenter)
+
+def _print_verbose(mano_response, verbose_level=0):
+    content = mano_response.json()
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if type(content)!=dict or len(content)!=1:
+        #print "Non expected format output"
+        print str(content)
+        return result
+    
+    val=content.values()[0]
+    if type(val)==str:
+        print val
+        return result
+    elif type(val) == list:
+        content_list = val
+    elif type(val)==dict:
+        content_list = [val]
+    else:
+        #print "Non expected dict/list format output"
+        print str(content)
+        return result
+    
+    #print content_list
+    if verbose_level==None:
+        verbose_level=0
+    if verbose_level >= 3:
+        print yaml.safe_dump(content, indent=4, default_flow_style=False)
+        return result
+
+    if mano_response.status_code == 200:
+        for content in content_list:
+            if "uuid" in content:
+                uuid = content['uuid']
+            elif "id" in content:
+                uuid = content['id']
+            elif "vim_id" in content:
+                uuid = content['vim_id']
+            myoutput = "%s %s" %(uuid.ljust(38),content['name'].ljust(20))
+            if "status" in content:
+                myoutput += " " + content['status'].ljust(20)
+            elif "enabled" in content and not content["enabled"]:
+                myoutput += " enabled=False".ljust(20)
+            if verbose_level >=1:
+                if 'created_at' in content:
+                    myoutput += " " + content['created_at'].ljust(20)
+                if verbose_level >=2:
+                    new_line='\n'
+                    if 'type' in content and content['type']!=None:
+                        myoutput += new_line + "  Type: " + content['type'].ljust(29)
+                        new_line=''
+                    if 'description' in content and content['description']!=None:
+                        myoutput += new_line + "  Description: " + content['description'].ljust(20)
+            print myoutput
+    else:
+        print content['error']['description']
+    return result
+
+def parser_json_yaml(file_name):
+    try:
+        f = file(file_name, "r")
+        text = f.read()
+        f.close()
+    except Exception as e:
+        return (False, str(e))
+           
+    #Read and parse file
+    if file_name[-5:]=='.yaml' or file_name[-4:]=='.yml' or (file_name[-5:]!='.json' and '\t' not in text):
+        try:
+            config = yaml.load(text)
+        except yaml.YAMLError as exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at line:%s column:%s" % (mark.line+1, mark.column+1)
+            return (False, "Error loading file '"+file_name+"' yaml format error" + error_pos)
+    else: #json
+        try:
+            config = json.loads(text) 
+        except Exception as e:
+            return (False, "Error loading file '"+file_name+"' json format error " + str(e) )
+
+    return True, config
+
+def _load_file_or_yaml(content):
+    '''
+    'content' can be or a yaml/json file or a text containing a yaml/json text format
+    This function autodetect, trying to load and parse the file,
+    if fails trying to parse the 'content' text
+    Returns the dictionary once parsed, or print an error and finish the program
+    '''
+    #Check config file exists
+    if os.path.isfile(content):
+        r,payload = parser_json_yaml(content)
+        if not r:
+            print payload
+            exit(-1)
+    elif "{" in content or ":" in content:
+        try:
+            payload = yaml.load(content)
+        except yaml.YAMLError as exc:
+            error_pos = ""
+            if hasattr(exc, 'problem_mark'):
+                mark = exc.problem_mark
+                error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
+            print "Error loading yaml/json text"+error_pos
+            exit (-1)
+    else:
+        print "'%s' is neither a valid file nor a yaml/json content" % content
+        exit(-1)
+    return payload
+
+def _get_item_uuid(item, item_name_id, tenant=None):
+    if tenant:
+        URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, tenant, item)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s" %(mano_host, mano_port, item)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    #print content
+    found = 0
+    for i in content[item]:
+        if i["uuid"] == item_name_id:
+            return item_name_id
+        if i["name"] == item_name_id:
+            uuid = i["uuid"]
+            found += 1
+    if found == 0:
+        raise OpenmanoCLIError("No %s found with name/uuid '%s'" %(item[:-1], item_name_id))
+    elif found > 1:
+        raise OpenmanoCLIError("%d %s found with name '%s'. uuid must be used" %(found, item, item_name_id))
+    return uuid
+# 
+# def check_valid_uuid(uuid):
+#     id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+#     try:
+#         js_v(uuid, id_schema)
+#         return True
+#     except js_e.ValidationError:
+#         return False
+    
+def _get_tenant(tenant_name_id = None):
+    if not tenant_name_id:
+        tenant_name_id = mano_tenant
+        if not mano_tenant:
+            raise OpenmanoCLIError("'OPENMANO_TENANT' environment variable is not set")
+    return _get_item_uuid("tenants", tenant_name_id)
+
+def _get_datacenter(datacenter_name_id = None, tenant = "any"):
+    if not datacenter_name_id:
+        datacenter_name_id = mano_datacenter
+        if not datacenter_name_id:
+            raise OpenmanoCLIError("neither 'OPENMANO_DATACENTER' environment variable is set nor --datacenter option is used")
+    return _get_item_uuid("datacenters", datacenter_name_id, tenant)
+
+def vnf_create(args):
+    #print "vnf-create",args
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    tenant = _get_tenant()
+    myvnf = _load_file_or_yaml(args.file)
+
+    if args.name or args.description or args.image_path:
+        #print args.name
+        try:
+            if args.name:
+                myvnf['vnf']['name'] = args.name
+            if args.description:
+                myvnf['vnf']['description'] = args.description
+            if args.image_path:
+                index=0
+                for image_path_ in args.image_path.split(","):
+                    #print "image-path", image_path_
+                    myvnf['vnf']['VNFC'][index]['VNFC image']=image_path_
+                    index=index+1
+        except (KeyError, TypeError), e:
+            if str(e)=='vnf':           error_pos= "missing field 'vnf'"
+            elif str(e)=='name':        error_pos= "missing field  'vnf':'name'"
+            elif str(e)=='description': error_pos= "missing field  'vnf':'description'"
+            elif str(e)=='VNFC':        error_pos= "missing field  'vnf':'VNFC'"
+            elif str(e)==str(index):    error_pos= "field  'vnf':'VNFC' must be an array"
+            elif str(e)=='VNFC image':  error_pos= "missing field 'vnf':'VNFC'['VNFC image']"
+            else:                       error_pos="wrong format"
+            print "Wrong VNF descriptor: " + error_pos
+            return -1 
+    payload_req = json.dumps(myvnf)
+        
+    #print payload_req
+        
+    URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, tenant)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+
+    return _print_verbose(mano_response, args.verbose)
+
+def vnf_list(args):
+    #print "vnf-list",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("vnfs", args.name, tenant)
+        URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/vnfs" %(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            if len(content['vnfs']) == 0:
+                print "No VNFs were found."
+                return 404 #HTTP_Not_Found
+            for vnf in content['vnfs']:
+                myoutput = "%s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20))
+                if args.verbose >=1:
+                    myoutput = "%s %s" %(myoutput, vnf['created_at'].ljust(20))
+                print myoutput
+                if args.verbose >=2:
+                    print "  Description: %s" %vnf['description']
+                    print "  VNF descriptor file: %s" %vnf['path']
+        else:
+            if args.verbose:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            vnf = content['vnf']
+            print "%s %s %s" %(vnf['uuid'].ljust(38),vnf['name'].ljust(20), vnf['created_at'].ljust(20))
+            print "  Description: %s" %vnf['description']
+            #print "  VNF descriptor file: %s" %vnf['path']
+            print "    VMs:"
+            for vm in vnf['VNFC']:
+                #print "    %s %s %s" %(vm['name'].ljust(20), vm['uuid'].ljust(38), vm['description'].ljust(30))
+                print "        %s %s" %(vm['name'].ljust(20), vm['description'])
+            if len(vnf['nets'])>0:
+                print "    Internal nets:"
+                for net in vnf['nets']:
+                    print "        %s %s" %(net['name'].ljust(20), net['description'])
+            if len(vnf['external-connections'])>0:
+                print "    External interfaces:"
+                for interface in vnf['external-connections']:
+                    print "        %s %s %s %s" %(interface['external_name'].ljust(20), interface['vm_name'].ljust(20), interface['internal_name'].ljust(20), \
+                                                  interface['vpci'].ljust(14))
+    else:
+        print content['error']['description']
+        if args.verbose:
+            print yaml.safe_dump(content, indent=4, default_flow_style=False)
+    return result
+
+def vnf_delete(args):
+    #print "vnf-delete",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("vnfs", args.name, tenant=tenant)
+    if not args.force:
+        r = raw_input("Delete VNF %s (y/N)? " %(todelete))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://%s:%s/openmano/%s/vnfs/%s" %(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def scenario_create(args):
+    #print "scenario-create",args
+    tenant = _get_tenant()
+    headers_req = {'content-type': 'application/yaml'}
+    myscenario = _load_file_or_yaml(args.file)
+
+    if args.name:
+        myscenario['name'] = args.name
+    if args.description:
+        myscenario['description'] = args.description
+    payload_req = yaml.safe_dump(myscenario, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
+    
+    #print payload_req
+        
+    URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, tenant)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def scenario_list(args):
+    #print "scenario-list",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("scenarios", args.name, tenant)
+        URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/scenarios" %(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            if len(content['scenarios']) == 0:
+                print "No scenarios were found."
+                return 404 #HTTP_Not_Found
+            for scenario in content['scenarios']:
+                myoutput = "%s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20))
+                if args.verbose >=1:
+                    myoutput = "%s %s" %(myoutput, scenario['created_at'].ljust(20))
+                print myoutput
+                if args.verbose >=2:
+                    print "  Description: %s" %scenario['description']
+        else:
+            if args.verbose:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            scenario = content['scenario']
+            myoutput = "%s %s %s" %(scenario['uuid'].ljust(38),scenario['name'].ljust(20), scenario['created_at'].ljust(20))
+            print myoutput
+            print "  Description: %s" %scenario['description']
+            print "    VNFs:"
+            for vnf in scenario['vnfs']:
+                print "        %s %s %s" %(vnf['name'].ljust(20), vnf['vnf_id'].ljust(38), vnf['description'])
+            if len(scenario['nets'])>0:
+                print "    Internal nets:"
+                for net in scenario['nets']:
+                    if net['description'] is None:   #if description does not exist, description is "-". Valid for external and internal nets.
+                        net['description'] = '-' 
+                    if not net['external']:
+                        print "        %s %s %s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30))
+                print "    External nets:"
+                for net in scenario['nets']:
+                    if net['external']:
+                        print "        %s %s %s vim-id:%s" %(net['name'].ljust(20), net['uuid'].ljust(38), net['description'].ljust(30), net['vim_id'])
+    else:
+        print content['error']['description']
+        if args.verbose:
+            print yaml.safe_dump(content, indent=4, default_flow_style=False)
+    return result
+
+def scenario_delete(args):
+    #print "scenario-delete",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("scenarios", args.name, tenant=tenant)
+    if not args.force:
+        r = raw_input("Delete scenario %s (y/N)? " %(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://%s:%s/openmano/%s/scenarios/%s" %(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def scenario_deploy(args):
+    print "This command is deprecated, use 'openmano instance-scenario-create --scenario %s --name %s' instead!!!" % (args.scenario, args.name)
+    print
+    args.file = None
+    args.netmap_use = None
+    args.netmap_create = None
+    return instance_create(args)
+
+#     #print "scenario-deploy",args
+#     headers_req = {'content-type': 'application/json'}
+#     action = {}
+#     actionCmd="start"
+#     if args.nostart:
+#         actionCmd="reserve"
+#     action[actionCmd] = {}
+#     action[actionCmd]["instance_name"] = args.name
+#     if args.datacenter != None:
+#         action[actionCmd]["datacenter"] = args.datacenter
+#     elif mano_datacenter != None:
+#         action[actionCmd]["datacenter"] = mano_datacenter
+#         
+#     if args.description:
+#         action[actionCmd]["description"] = args.description
+#     payload_req = json.dumps(action, indent=4)
+#     #print payload_req
+# 
+#     URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario)
+#     logger.debug("openmano request: %s", payload_req)
+#     mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+#     logger.debug("openmano response: %s", mano_response.text )
+#     if args.verbose==None:
+#         args.verbose=0
+#     
+#     result = 0 if mano_response.status_code==200 else mano_response.status_code
+#     content = mano_response.json()
+#     #print json.dumps(content, indent=4)
+#     if args.verbose >= 3:
+#         print yaml.safe_dump(content, indent=4, default_flow_style=False)
+#         return result
+# 
+#     if mano_response.status_code == 200:
+#         myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20))
+#         if args.verbose >=1:
+#             myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20))
+#         if args.verbose >=2:
+#             myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30))
+#         print myoutput
+#         print ""
+#         print "To check the status, run the following command:"
+#         print "openmano instance-scenario-list <instance_id>"
+#     else:
+#         print content['error']['description']
+#     return result
+
+def scenario_verify(args):
+    #print "scenario-verify",args
+    headers_req = {'content-type': 'application/json'}
+    action = {}
+    action["verify"] = {}
+    action["verify"]["instance_name"] = "scen-verify-return5"
+    payload_req = json.dumps(action, indent=4)
+    #print payload_req
+
+    URLrequest = "http://%s:%s/openmano/%s/scenarios/%s/action" %(mano_host, mano_port, mano_tenant, args.scenario)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def instance_create(args):
+    tenant = _get_tenant()
+    headers_req = {'content-type': 'application/yaml'}
+    myInstance={"instance": {}, "schema_version": "0.1"}
+    if args.file:
+        instance_dict = _load_file_or_yaml(args.file)
+        if "instance" not in instance_dict:
+            myInstance = {"instance": instance_dict, "schema_version": "0.1"}
+        else:
+            myInstance = instance_dict
+    if args.name:
+        myInstance["instance"]['name'] = args.name
+    if args.description:
+        myInstance["instance"]['description'] = args.description
+    if args.nostart:
+        myInstance["instance"]['action'] = "reserve"
+    #datacenter
+    datacenter = myInstance["instance"].get("datacenter")
+    if args.datacenter != None:
+        datacenter = args.datacenter
+    myInstance["instance"]["datacenter"] = _get_datacenter(datacenter, tenant)
+    #scenario
+    scenario = myInstance["instance"].get("scenario")
+    if args.scenario != None:
+        scenario = args.scenario
+    if not scenario:
+        print "you must provide an scenario in the file descriptor or with --scenario"
+        return -1
+    myInstance["instance"]["scenario"] = _get_item_uuid("scenarios", scenario, tenant)
+    if args.netmap_use:
+        if "networks" not in myInstance["instance"]:
+            myInstance["instance"]["networks"] = {}
+        for net in args.netmap_use:
+            net_comma_list = net.split(",")
+            for net_comma in net_comma_list:
+                net_tuple = net_comma.split("=")
+                if len(net_tuple) != 2:
+                    print "error at netmap-use. Expected net-scenario=net-datacenter. (%s)?" % net_comma
+                    return
+                net_scenario   = net_tuple[0].strip()
+                net_datacenter = net_tuple[1].strip()
+                if net_scenario not in myInstance["instance"]["networks"]:
+                    myInstance["instance"]["networks"][net_scenario] = {} 
+                myInstance["instance"]["networks"][net_scenario]["netmap-use"] = net_datacenter
+    if args.netmap_create:
+        if "networks" not in myInstance["instance"]:
+            myInstance["instance"]["networks"] = {}
+        for net in args.netmap_create:
+            net_comma_list = net.split(",")
+            for net_comma in net_comma_list:
+                net_tuple = net_comma.split("=")
+                if len(net_tuple) == 1:
+                    net_scenario   = net_tuple[0].strip()
+                    net_datacenter = None
+                elif len(net_tuple) == 2:
+                    net_scenario   = net_tuple[0].strip()
+                    net_datacenter = net_tuple[1].strip()
+                else:
+                    print "error at netmap-create. Expected net-scenario=net-datacenter or net-scenario. (%s)?" % net_comma
+                    return
+                if net_scenario not in myInstance["instance"]["networks"]:
+                    myInstance["instance"]["networks"][net_scenario] = {} 
+                myInstance["instance"]["networks"][net_scenario]["netmap-create"] = net_datacenter
+                        
+    payload_req = yaml.safe_dump(myInstance, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
+    logger.debug("openmano request: %s", payload_req)
+    URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, tenant)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if args.verbose >= 3:
+        print yaml.safe_dump(content, indent=4, default_flow_style=False)
+        return result
+
+    if mano_response.status_code == 200:
+        myoutput = "%s %s" %(content['uuid'].ljust(38),content['name'].ljust(20))
+        if args.verbose >=1:
+            myoutput = "%s %s" %(myoutput, content['created_at'].ljust(20))
+        if args.verbose >=2:
+            myoutput = "%s %s %s" %(myoutput, content['description'].ljust(30))
+        print myoutput
+    else:
+        print content['error']['description']
+    return result
+
+def instance_scenario_list(args):
+    #print "instance-scenario-list",args
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    if args.name:
+        toshow = _get_item_uuid("instances", args.name, tenant)
+        URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/instances" %(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if args.verbose==None:
+        args.verbose=0
+
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        if not args.name:
+            if args.verbose >= 3:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            if len(content['instances']) == 0:
+                print "No scenario instances were found."
+                return result
+            for instance in content['instances']:
+                myoutput = "%s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20))
+                if args.verbose >=1:
+                    myoutput = "%s %s" %(myoutput, instance['created_at'].ljust(20))
+                print myoutput
+                if args.verbose >=2:
+                    print "Description: %s" %instance['description']
+        else:
+            if args.verbose:
+                print yaml.safe_dump(content, indent=4, default_flow_style=False)
+                return result
+            instance = content
+            print "%s %s %s" %(instance['uuid'].ljust(38),instance['name'].ljust(20),instance['created_at'].ljust(20))
+            print "Description: %s" %instance['description']
+            print "Template scenario id: %s" %instance['scenario_id']
+            print "Template scenario name: %s" %instance['scenario_name']
+            print "---------------------------------------"
+            print "VNF instances: %d" %len(instance['vnfs'])
+            for vnf in instance['vnfs']:
+                #print "    %s %s Template vnf name: %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['name'].ljust(20), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38))
+                print "    %s %s Template vnf id: %s" %(vnf['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vnf['vnf_id'].ljust(38))
+            if len(instance['nets'])>0:
+                print "---------------------------------------"
+                print "Internal nets:"
+                for net in instance['nets']:
+                    if not net['external']:
+                        print "    %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id'])
+                print "---------------------------------------"
+                print "External nets:"
+                for net in instance['nets']:
+                    if net['external']:
+                        print "    %s %s VIM ID: %s" %(net['uuid'].ljust(38), net['status'].ljust(12), net['vim_net_id'])
+            print "---------------------------------------"
+            print "VM instances:"
+            for vnf in instance['vnfs']:
+                for vm in vnf['vms']:
+                    print "    %s %s %s %s VIM ID: %s" %(vm['uuid'].ljust(38), vnf['vnf_name'].ljust(20), vm['name'].ljust(20), vm['status'].ljust(12), vm['vim_vm_id'])
+    else:
+        print content['error']['description']
+        if args.verbose:
+            print yaml.safe_dump(content, indent=4, default_flow_style=False)
+    return result
+
+def instance_scenario_status(args):
+    print "instance-scenario-status"
+    return 0
+
+def instance_scenario_delete(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    todelete = _get_item_uuid("instances", args.name, tenant=tenant)
+    #print "instance-scenario-delete",args
+    if not args.force:
+        r = raw_input("Delete scenario instance %s (y/N)? " %(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return
+    URLrequest = "http://%s:%s/openmano/%s/instances/%s" %(mano_host, mano_port, tenant, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def instance_scenario_action(args):
+    #print "instance-scenario-action", args
+    tenant = _get_tenant()
+    toact = _get_item_uuid("instances", args.name, tenant=tenant)
+    action={}
+    action[ args.action ] = args.param
+    if args.vnf:
+        action["vnfs"] = args.vnf
+    if args.vm:
+        action["vms"] = args.vm
+    
+    headers_req = {'content-type': 'application/json'}
+    payload_req = json.dumps(action, indent=4)
+    URLrequest = "http://%s:%s/openmano/%s/instances/%s/action" %(mano_host, mano_port, tenant, toact)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        if args.verbose:
+            print yaml.safe_dump(content, indent=4, default_flow_style=False)
+            return result
+        for uuid,c in content.iteritems():
+            print "%s %s %s" %(uuid.ljust(38), c['name'].ljust(20),c['description'].ljust(20))
+    else:
+        print content['error']['description']
+    return result
+
+
+def instance_vnf_list(args):
+    print "instance-vnf-list"
+    return 0
+
+def instance_vnf_status(args):
+    print "instance-vnf-status"
+    return 0
+
+def tenant_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    tenant_dict={"name": args.name}
+    if args.description!=None:
+        tenant_dict["description"] = args.description 
+    payload_req = json.dumps( {"tenant": tenant_dict })
+    
+    #print payload_req
+        
+    URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def tenant_list(args):
+    #print "tenant-list",args
+    if args.name:
+        toshow = _get_item_uuid("tenants", args.name)
+        URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/tenants" %(mano_host, mano_port)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+def tenant_delete(args):
+    #print "tenant-delete",args
+    todelete = _get_item_uuid("tenants", args.name)
+    if not args.force:
+        r = raw_input("Delete tenant %s (y/N)? " %(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://%s:%s/openmano/tenants/%s" %(mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def datacenter_attach(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    
+    datacenter_dict={}
+    if args.vim_tenant_id != None:
+        datacenter_dict['vim_tenant'] = args.vim_tenant_id
+    if args.vim_tenant_name != None:
+        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+    if args.user != None:
+        datacenter_dict['vim_username'] = args.user
+    if args.password != None:
+        datacenter_dict['vim_password'] = args.password
+    payload_req = json.dumps( {"datacenter": datacenter_dict })
+    
+    #print payload_req
+        
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = _print_verbose(mano_response, args.verbose)
+    #provide addional information if error
+    if mano_response.status_code != 200:
+        content = mano_response.json()
+        if "already in use for  'name'" in content['error']['description'] and \
+                "to database vim_tenants table" in content['error']['description']:
+            print "Try to specify a different name with --vim-tenant-name"
+    return result
+
+def datacenter_detach(args):
+    if args.all:
+        tenant = "any"
+    else:
+        tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.delete(URLrequest, headers=headers_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def datacenter_create(args):
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    datacenter_dict={"name": args.name, "vim_url": args.url}
+    if args.description!=None:
+        datacenter_dict["description"] = args.description 
+    if args.type!=None:
+        datacenter_dict["type"] = args.type 
+    if args.url!=None:
+        datacenter_dict["vim_url_admin"] = args.url_admin 
+    if args.config!=None:
+        datacenter_dict["config"] = _load_file_or_yaml(args.config) 
+    payload_req = json.dumps( {"datacenter": datacenter_dict })
+    
+    #print payload_req
+        
+    URLrequest = "http://%s:%s/openmano/datacenters" %(mano_host, mano_port)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def datacenter_delete(args):
+    #print "datacenter-delete",args
+    todelete = _get_item_uuid("datacenters", args.name, "any")
+    if not args.force:
+        r = raw_input("Delete datacenter %s (y/N)? " %(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+    URLrequest = "http://%s:%s/openmano/datacenters/%s" %(mano_host, mano_port, todelete)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    result = 0 if mano_response.status_code==200 else mano_response.status_code
+    content = mano_response.json()
+    #print json.dumps(content, indent=4)
+    if mano_response.status_code == 200:
+        print content['result']
+    else:
+        print content['error']['description']
+    return result
+
+def datacenter_list(args):
+    #print "datacenter-list",args
+    tenant='any' if args.all else _get_tenant()
+    
+    if args.name:
+        toshow = _get_item_uuid("datacenters", args.name, tenant) 
+        URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" %(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/datacenters" %(mano_host, mano_port, tenant)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+def vim_action(args):
+    #print "datacenter-net-action",args
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    if args.verbose==None:
+        args.verbose=0
+    if args.action=="list":
+        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss" %(mano_host, mano_port, tenant, datacenter, args.item)
+        if args.name!=None:
+            args.verbose += 1
+            URLrequest += "/" + args.name
+        mano_response = requests.get(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text )
+        return _print_verbose(mano_response, args.verbose)
+    elif args.action=="delete":
+        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss/%s" %(mano_host, mano_port, tenant, datacenter, args.item, args.name)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text )
+        result = 0 if mano_response.status_code==200 else mano_response.status_code
+        content = mano_response.json()
+        #print json.dumps(content, indent=4)
+        if mano_response.status_code == 200:
+            print content['result']
+        else:
+            print content['error']['description']
+        return result
+    elif args.action=="create":
+        headers_req = {'content-type': 'application/yaml'}
+        if args.file:
+            create_dict = _load_file_or_yaml(args.file)
+            if args.item not in create_dict:
+                create_dict = {args.item: create_dict}
+        else:
+            create_dict = {args.item:{}}
+        if args.name:
+            create_dict[args.item]['name'] = args.name
+        #if args.description:
+        #    create_dict[args.item]['description'] = args.description
+        if args.item=="vim-net":
+            if args.bind_net:
+                create_dict[args.item]['bind_net'] = args.bind_net
+            if args.bind_type:
+                create_dict[args.item]['bind_type'] = args.bind_type
+            if args.shared:
+                create_dict[args.item]['shared'] = args.shared
+        if "name" not in create_dict[args.item]:
+            print "You must provide a name in the descriptor file or with the --name option"
+            return
+        payload_req = yaml.safe_dump(create_dict, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True)
+        logger.debug("openmano request: %s", payload_req)
+        URLrequest = "http://%s:%s/openmano/%s/vim/%s/%ss" %(mano_host, mano_port, mano_tenant, datacenter, args.item)
+        mano_response = requests.post(URLrequest, headers = headers_req, data=payload_req)
+        logger.debug("openmano response: %s", mano_response.text )
+        if args.verbose==None:
+            args.verbose=0
+        return _print_verbose(mano_response, args.verbose)
+
+
+def datacenter_net_action(args):
+    if args.action == "net-update":
+        print "This command is deprecated, use 'openmano datacenter-netmap-delete --all' and 'openmano datacenter-netmap-upload' instead!!!"
+        print
+        args.action = "netmap-delete"
+        args.netmap = None
+        args.all = True
+        r = datacenter_netmap_action(args)
+        if r == 0:
+            args.force = True
+            args.action = "netmap-upload"
+            r = datacenter_netmap_action(args)
+        return r
+
+    if args.action == "net-edit":
+        args.netmap = args.net
+        args.name = None
+    elif args.action == "net-list":
+        args.netmap = None
+    elif args.action == "net-delete":
+        args.netmap = args.net
+        args.all = False
+          
+    args.action = "netmap" + args.action[3:]
+    args.vim_name=None
+    args.vim_id=None
+    print "This command is deprecated, use 'openmano datacenter-%s' instead!!!" % args.action
+    print
+    return datacenter_netmap_action(args)
+
+def datacenter_netmap_action(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.datacenter, tenant)
+    #print "datacenter_netmap_action",args
+    payload_req = None
+    if args.verbose==None:
+        args.verbose=0
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/netmaps" %(mano_host, mano_port, tenant, datacenter)
+        
+    if args.action=="netmap-list":
+        if args.netmap:
+            URLrequest += "/" + args.netmap
+            args.verbose += 1
+        mano_response = requests.get(URLrequest)
+            
+    elif args.action=="netmap-delete":
+        if args.netmap and args.all:
+            print "you can not use a netmap name and the option --all at the same time"
+            return 1
+        if args.netmap:
+            force_text= "Delete default netmap '%s' from datacenter '%s' (y/N)? " % (args.netmap, datacenter)
+            URLrequest += "/" + args.netmap
+        elif args.all: 
+            force_text="Delete all default netmaps from datacenter '%s' (y/N)? " % (datacenter)
+        else:
+            print "you must specify a netmap name or the option --all"
+            return 1
+        if not args.force:
+            r = raw_input(force_text)
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+        mano_response = requests.delete(URLrequest, headers=headers_req)
+    elif args.action=="netmap-upload":
+        if not args.force:
+            r = raw_input("Create all the available networks from datacenter '%s' as default netmaps (y/N)? " % (datacenter))
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+        URLrequest += "/upload"
+        mano_response = requests.post(URLrequest, headers=headers_req)
+    elif args.action=="netmap-edit" or args.action=="netmap-create":
+        if args.file:
+            payload = _load_file_or_yaml(args.file)
+        else:
+            payload = {}
+        if "netmap" not in payload:
+            payload = {"netmap": payload}
+        if args.name:
+            payload["netmap"]["name"] = args.name
+        if args.vim_id:
+            payload["netmap"]["vim_id"] = args.vim_id
+        if args.action=="netmap-create" and args.vim_name:
+            payload["netmap"]["vim_name"] = args.vim_name
+        payload_req = json.dumps(payload)
+        logger.debug("openmano request: %s", payload_req)
+        
+        if args.action=="netmap-edit" and not args.force:
+            if len(payload["netmap"]) == 0:
+                print "You must supply some parameter to edit"
+                return 1
+            r = raw_input("Edit default netmap '%s' from datacenter '%s' (y/N)? " % (args.netmap, datacenter))
+            if  len(r)>0  and r[0].lower()=="y":
+                pass
+            else:
+                return 0
+            URLrequest += "/" + args.netmap
+            mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+        else: #netmap-create
+            if "vim_name" not in payload["netmap"] and "vim_id" not in payload["netmap"]:
+                print "You must supply either --vim-id or --vim-name option; or include one of them in the file descriptor"
+                return 1
+            mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+
+    logger.debug("openmano response: %s", mano_response.text )
+    return _print_verbose(mano_response, args.verbose)
+
+def element_edit(args):
+    element = _get_item_uuid(args.element, args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/%s/%s" %(mano_host, mano_port, args.element, element)
+    payload=_load_file_or_yaml(args.file)
+    if args.element[:-1] not in payload:
+        payload = {args.element[:-1]: payload }
+    payload_req = json.dumps(payload)
+    
+    #print payload_req
+    if not args.force or (args.name==None and args.filer==None):
+        r = raw_input(" Edit " + args.element[:-1] + " " + args.name + " (y/N)? ")
+        if  len(r)>0  and r[0].lower()=="y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
+
+global mano_host
+global mano_port
+global mano_tenant
+
+if __name__=="__main__":
+    
+    mano_tenant = os.getenv('OPENMANO_TENANT', None)
+    mano_host = os.getenv('OPENMANO_HOST',"localhost")
+    mano_port = os.getenv('OPENMANO_PORT',"9090")
+    mano_datacenter = os.getenv('OPENMANO_DATACENTER',None)
+    
+    main_parser = ThrowingArgumentParser(description='User program to interact with OPENMANO-SERVER (openmanod)')
+    main_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__ )
+    
+    subparsers = main_parser.add_subparsers(help='commands')
+    
+    parent_parser = argparse.ArgumentParser(add_help=False)
+    parent_parser.add_argument('--verbose', '-v', action='count', help="increase verbosity level. Use several times")
+    parent_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+
+    config_parser = subparsers.add_parser('config', parents=[parent_parser], help="prints configuration values")
+    config_parser.add_argument("-n", action="store_true", help="resolves tenant and datacenter names")
+    config_parser.set_defaults(func=config)
+
+    vnf_create_parser = subparsers.add_parser('vnf-create', parents=[parent_parser], help="adds a vnf into the catalogue")
+    vnf_create_parser.add_argument("file", action="store", help="location of the JSON file describing the VNF").completer = FilesCompleter
+    vnf_create_parser.add_argument("--name", action="store", help="name of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+    vnf_create_parser.add_argument("--description", action="store", help="description of the VNF (if it exists in the VNF descriptor, it is overwritten)")
+    vnf_create_parser.add_argument("--image-path", action="store",  help="change image path locations (overwritten)")
+    vnf_create_parser.set_defaults(func=vnf_create)
+
+    vnf_list_parser = subparsers.add_parser('vnf-list', parents=[parent_parser], help="lists information about a vnf")
+    vnf_list_parser.add_argument("name", nargs='?', help="name of the VNF")
+    vnf_list_parser.add_argument("-a", "--all", action="store_true", help="shows all vnfs, not only the owned or public ones")
+    #vnf_list_parser.add_argument('--descriptor', help="prints the VNF descriptor", action="store_true")
+    vnf_list_parser.set_defaults(func=vnf_list)
+    
+    vnf_delete_parser = subparsers.add_parser('vnf-delete', parents=[parent_parser], help="deletes a vnf from the catalogue")
+    vnf_delete_parser.add_argument("name", action="store", help="name or uuid of the VNF to be deleted")
+    vnf_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    vnf_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    vnf_delete_parser.set_defaults(func=vnf_delete)
+    
+    scenario_create_parser = subparsers.add_parser('scenario-create', parents=[parent_parser], help="adds a scenario into the OPENMANO DB")
+    scenario_create_parser.add_argument("file", action="store", help="location of the YAML file describing the scenario").completer = FilesCompleter
+    scenario_create_parser.add_argument("--name", action="store", help="name of the scenario (if it exists in the YAML scenario, it is overwritten)")
+    scenario_create_parser.add_argument("--description", action="store", help="description of the scenario (if it exists in the YAML scenario, it is overwritten)")
+    scenario_create_parser.set_defaults(func=scenario_create)
+
+    scenario_list_parser = subparsers.add_parser('scenario-list', parents=[parent_parser], help="lists information about a scenario")
+    scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario")
+    #scenario_list_parser.add_argument('--descriptor', help="prints the scenario descriptor", action="store_true")
+    scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all scenarios, not only the owned or public ones")
+    scenario_list_parser.set_defaults(func=scenario_list)
+    
+    scenario_delete_parser = subparsers.add_parser('scenario-delete', parents=[parent_parser], help="deletes a scenario from the OPENMANO DB")
+    scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario to be deleted")
+    scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    scenario_delete_parser.set_defaults(func=scenario_delete)
+
+    scenario_deploy_parser = subparsers.add_parser('scenario-deploy', parents=[parent_parser], help="deploys a scenario")
+    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be deployed")
+    scenario_deploy_parser.add_argument("name", action="store", help="name of the instance")
+    scenario_deploy_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+    scenario_deploy_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+    scenario_deploy_parser.add_argument("--description", action="store", help="description of the instance")
+    scenario_deploy_parser.set_defaults(func=scenario_deploy)
+    
+    scenario_deploy_parser = subparsers.add_parser('scenario-verify', help="verifies if a scenario can be deployed (deploys it and deletes it)")
+    scenario_deploy_parser.add_argument("scenario", action="store", help="name or uuid of the scenario to be verified")
+    scenario_deploy_parser.add_argument('--debug', '-d', action='store_true', help="show debug information")
+    scenario_deploy_parser.set_defaults(func=scenario_verify)
+    
+    instance_scenario_create_parser = subparsers.add_parser('instance-scenario-create', parents=[parent_parser], help="deploys a scenario")
+    instance_scenario_create_parser.add_argument("file", nargs='?', help="descriptor of the instance. Must be a file or yaml/json text")
+    instance_scenario_create_parser.add_argument("--scenario", action="store", help="name or uuid of the scenario to be deployed")
+    instance_scenario_create_parser.add_argument("--name", action="store", help="name of the instance")
+    instance_scenario_create_parser.add_argument("--nostart", action="store_true", help="does not start the vms, just reserve resources")
+    instance_scenario_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter. Needed if several datacenters are available")
+    instance_scenario_create_parser.add_argument("--netmap-use", action="append", type=str, dest="netmap_use", help="indicates a datacenter network to map a scenario network 'scenario-network=datacenter-network'. Can be used several times")
+    instance_scenario_create_parser.add_argument("--netmap-create", action="append", type=str, dest="netmap_create", help="the scenario network must be created at datacenter 'scenario-network[=datacenter-network-name]' . Can be used several times")
+    instance_scenario_create_parser.add_argument("--description", action="store", help="description of the instance")
+    instance_scenario_create_parser.set_defaults(func=instance_create)
+
+    instance_scenario_list_parser = subparsers.add_parser('instance-scenario-list', parents=[parent_parser], help="lists information about a scenario instance")
+    instance_scenario_list_parser.add_argument("name", nargs='?', help="name of the scenario instance")
+    instance_scenario_list_parser.add_argument("-a", "--all", action="store_true", help="shows all instance-scenarios, not only the owned")
+    instance_scenario_list_parser.set_defaults(func=instance_scenario_list)
+
+    instance_scenario_delete_parser = subparsers.add_parser('instance-scenario-delete', parents=[parent_parser], help="deletes a scenario instance (and deletes all VM and net instances in VIM)")
+    instance_scenario_delete_parser.add_argument("name", action="store", help="name or uuid of the scenario instance to be deleted")
+    instance_scenario_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    instance_scenario_delete_parser.add_argument("-a", "--all", action="store_true", help="allow delete not owned or privated one")
+    instance_scenario_delete_parser.set_defaults(func=instance_scenario_delete)
+    
+    instance_scenario_action_parser = subparsers.add_parser('instance-scenario-action', parents=[parent_parser], help="invoke an action over part or the whole scenario instance")
+    instance_scenario_action_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+    instance_scenario_action_parser.add_argument("action", action="store", type=str, \
+            choices=["start","pause","resume","shutoff","shutdown","forceOff","rebuild","reboot", "console"],\
+            help="action to send")
+    instance_scenario_action_parser.add_argument("param", nargs='?', help="addional param of the action. e.g. console type (novnc, ...), reboot type (TODO)")
+    instance_scenario_action_parser.add_argument("--vnf", action="append", help="VNF to act on (can use several entries)")
+    instance_scenario_action_parser.add_argument("--vm", action="append", help="VM to act on (can use several entries)")
+    instance_scenario_action_parser.set_defaults(func=instance_scenario_action)
+
+    #instance_scenario_status_parser = subparsers.add_parser('instance-scenario-status', help="show the status of a scenario instance")
+    #instance_scenario_status_parser.add_argument("name", action="store", help="name or uuid of the scenario instance")
+    #instance_scenario_status_parser.set_defaults(func=instance_scenario_status)
+    
+    tenant_create_parser = subparsers.add_parser('tenant-create', parents=[parent_parser], help="creates a new tenant")
+    tenant_create_parser.add_argument("name", action="store", help="name for the tenant")
+    tenant_create_parser.add_argument("--description", action="store", help="description of the tenant")
+    tenant_create_parser.set_defaults(func=tenant_create)
+
+    tenant_delete_parser = subparsers.add_parser('tenant-delete', parents=[parent_parser], help="deletes a tenant from the catalogue")
+    tenant_delete_parser.add_argument("name", action="store", help="name or uuid of the tenant to be deleted")
+    tenant_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    tenant_delete_parser.set_defaults(func=tenant_delete)
+
+    tenant_list_parser = subparsers.add_parser('tenant-list', parents=[parent_parser], help="lists information about a tenant")
+    tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant")
+    tenant_list_parser.set_defaults(func=tenant_list)
+
+    item_list=('tenant','datacenter') #put tenant before so that help appear in order
+    for item in item_list:
+        element_edit_parser = subparsers.add_parser(item+'-edit', parents=[parent_parser], help="edits one "+item)
+        element_edit_parser.add_argument("name", help="name or uuid of the "+item)
+        element_edit_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+        element_edit_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        element_edit_parser.set_defaults(func=element_edit, element=item + 's')
+
+    datacenter_create_parser = subparsers.add_parser('datacenter-create', parents=[parent_parser], help="creates a new datacenter")
+    datacenter_create_parser.add_argument("name", action="store", help="name for the datacenter")
+    datacenter_create_parser.add_argument("url", action="store", help="url for the datacenter")
+    datacenter_create_parser.add_argument("--url_admin", action="store", help="url for administration for the datacenter")
+    datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)")
+    datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
+    datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter")
+    datacenter_create_parser.set_defaults(func=datacenter_create)
+
+    datacenter_delete_parser = subparsers.add_parser('datacenter-delete', parents=[parent_parser], help="deletes a datacenter from the catalogue")
+    datacenter_delete_parser.add_argument("name", action="store", help="name or uuid of the datacenter to be deleted")
+    datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    datacenter_delete_parser.set_defaults(func=datacenter_delete)
+
+    datacenter_list_parser = subparsers.add_parser('datacenter-list', parents=[parent_parser], help="lists information about a datacenter")
+    datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter")
+    datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant")
+    datacenter_list_parser.set_defaults(func=datacenter_list)
+
+    datacenter_attach_parser = subparsers.add_parser('datacenter-attach', parents=[parent_parser], help="associates a datacenter to the operating tenant")
+    datacenter_attach_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_attach_parser.add_argument('--vim-tenant-id', action='store', help="specify a datacenter tenant to use. A new one is created by default")
+    datacenter_attach_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+    datacenter_attach_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+    datacenter_attach_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+    datacenter_attach_parser.set_defaults(func=datacenter_attach)
+
+    datacenter_detach_parser = subparsers.add_parser('datacenter-detach', parents=[parent_parser], help="removes the association between a datacenter and the operating tenant")
+    datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_detach_parser.add_argument("-a", "--all", action="store_true", help="removes all associations from this datacenter")
+    datacenter_detach_parser.set_defaults(func=datacenter_detach)
+
+
+    action_dict={'net-update': 'retrieves external networks from datacenter',
+                 'net-edit': 'edits an external network',
+                 'net-delete': 'deletes an external network',
+                 'net-list': 'lists external networks from a datacenter'
+                 }
+    for item in action_dict:
+        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+        datacenter_action_parser.add_argument("datacenter", help="name or uuid of the datacenter")
+        if item=='net-edit' or item=='net-delete':
+            datacenter_action_parser.add_argument("net", help="name or uuid of the datacenter net")
+        if item=='net-edit':
+            datacenter_action_parser.add_argument("file", help="json/yaml text or file with the changes").completer = FilesCompleter
+        if item!='net-list':
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        datacenter_action_parser.set_defaults(func=datacenter_net_action, action=item)
+
+
+    action_dict={'netmap-upload': 'create network senario netmap base on the datacenter networks',
+                 'netmap-create': 'create a new network senario netmap',
+                 'netmap-edit':   'edit name of a network senario netmap',
+                 'netmap-delete': 'deletes a network scenario netmap (--all for clearing all)',
+                 'netmap-list':   'list/show network scenario netmaps'
+                 }
+    for item in action_dict:
+        datacenter_action_parser = subparsers.add_parser('datacenter-'+item, parents=[parent_parser], help=action_dict[item])
+        datacenter_action_parser.add_argument("--datacenter", help="name or uuid of the datacenter")
+        #if item=='net-add':
+        #    datacenter_action_parser.add_argument("net", help="name of the network")
+        if item=='netmap-delete':
+            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to delete")
+            datacenter_action_parser.add_argument("--all", action="store_true", help="delete all netmap of this datacenter")
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        if item=='netmap-edit':
+            datacenter_action_parser.add_argument("netmap", help="name or uuid of the datacenter netmap do edit")
+            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file with the changes").completer = FilesCompleter
+            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap")
+            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        if item=='netmap-list':
+            datacenter_action_parser.add_argument("netmap", nargs='?',help="name or uuid of the datacenter netmap to show")
+        if item=='netmap-create':
+            datacenter_action_parser.add_argument("file", nargs='?', help="json/yaml text or file descriptor with the changes").completer = FilesCompleter
+            datacenter_action_parser.add_argument("--name", action='store', help="name to assign to the datacenter netmap, by default same as vim-name")
+            datacenter_action_parser.add_argument('--vim-id', action='store', help="specify vim network uuid")
+            datacenter_action_parser.add_argument('--vim-name', action='store', help="specify vim network name")
+        if item=='netmap-upload':
+            datacenter_action_parser.add_argument("-f","--force", action="store_true", help="do not prompt for confirmation")
+        datacenter_action_parser.set_defaults(func=datacenter_netmap_action, action=item)
+    
+    for item in ("network", "tenant"):
+        if item=="network":
+            commnad_name = 'vim-net'
+        else:
+            commnad_name = 'vim-'+item
+        vim_item_list_parser = subparsers.add_parser(commnad_name + '-list', parents=[parent_parser], help="list the vim " + item + "s")
+        vim_item_list_parser.add_argument("name", nargs='?', help="name or uuid of the " + item + "s")
+        vim_item_list_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+        vim_item_list_parser.set_defaults(func=vim_action, item=item, action="list")
+
+        vim_item_del_parser = subparsers.add_parser(commnad_name + '-delete', parents=[parent_parser], help="list the vim " + item + "s")
+        vim_item_del_parser.add_argument("name", help="name or uuid of the " + item + "s")
+        vim_item_del_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+        vim_item_del_parser.set_defaults(func=vim_action, item=item, action="delete")
+
+        vim_item_create_parser = subparsers.add_parser(commnad_name + '-create', parents=[parent_parser], help="create a "+item+" at vim")
+        vim_item_create_parser.add_argument("file", nargs='?', help="descriptor of the %s. Must be a file or yaml/json text" % item).completer = FilesCompleter
+        vim_item_create_parser.add_argument("--name", action="store", help="name of the %s" % item  )
+        vim_item_create_parser.add_argument("--datacenter", action="store", help="specifies the datacenter")
+        if item=="network":
+            vim_item_create_parser.add_argument("--type", action="store", help="type of network, data, ptp, bridge")
+            vim_item_create_parser.add_argument("--shared", action="store_true", help="Private or shared")
+            vim_item_create_parser.add_argument("--bind-net", action="store", help="For openvim datacenter type, net to be bind to, for vlan type, use sufix ':<vlan_tag>'")
+        else:
+            vim_item_create_parser.add_argument("--description", action="store", help="description of the %s" % item)
+        vim_item_create_parser.set_defaults(func=vim_action, item=item, action="create")
+
+    argcomplete.autocomplete(main_parser)
+    
+    try:
+        args = main_parser.parse_args()
+        #logging info
+        level = logging.CRITICAL
+        streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
+        if "debug" in args and args.debug:
+            level = logging.DEBUG
+        logging.basicConfig(format=streamformat, level= level)
+        logger = logging.getLogger('mano')
+        logger.setLevel(level)
+        result = args.func(args)
+        if result == None:
+            result = 0
+        #for some reason it fails if call exit inside try instance. Need to call exit at the end !?
+    except (requests.exceptions.ConnectionError):
+        print "Connection error: not possible to contact OPENMANO-SERVER (openmanod)"
+        result = -2
+    except (KeyboardInterrupt):
+        print 'Exiting openmano'
+        result = -3
+    except (SystemExit, ArgumentParserError):
+        result = -4
+    except OpenmanoCLIError as e:
+        print str(e)
+        result = -5
+    
+    #print result
+    exit(result)
+
diff --git a/models/openmano/bin/openmano_cleanup.sh b/models/openmano/bin/openmano_cleanup.sh
new file mode 100755 (executable)
index 0000000..326018d
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Run this on openmano VM to clean up all instances, scenarios and vnfs.
+
+./openmano instance-scenario-list | cut -d " " -f1 | while read line; do
+./openmano instance-scenario-delete $line -f
+done
+
+./openmano scenario-list | cut -d " " -f1 | while read line; do
+./openmano scenario-delete $line -f
+done
+
+./openmano vnf-list | cut -d " " -f1 | while read line; do
+./openmano vnf-delete $line -f
+done
diff --git a/models/openmano/python/CMakeLists.txt b/models/openmano/python/CMakeLists.txt
new file mode 100644 (file)
index 0000000..abbf139
--- /dev/null
@@ -0,0 +1,13 @@
+# Creation Date: 2016/1/12
+# RIFT_IO_STANDARD_CMAKE_COPYRIGHT_HEADER(END)
+
+cmake_minimum_required(VERSION 2.8)
+
+
+rift_python_install_tree(
+  FILES
+    rift/openmano/__init__.py
+    rift/openmano/rift2openmano.py
+    rift/openmano/openmano_client.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/models/openmano/python/rift/openmano/__init__.py b/models/openmano/python/rift/openmano/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/models/openmano/python/rift/openmano/openmano_client.py b/models/openmano/python/rift/openmano/openmano_client.py
new file mode 100755 (executable)
index 0000000..bd34be1
--- /dev/null
@@ -0,0 +1,524 @@
+#!/usr/bin/python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import logging
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import requests
+import json
+
+
+class OpenmanoCommandFailed(Exception):
+    pass
+
+
+class OpenmanoUnexpectedOutput(Exception):
+    pass
+
+
+class VNFExistsError(Exception):
+    pass
+
+
+class InstanceStatusError(Exception):
+    pass
+
+
+class OpenmanoHttpAPI(object):
+    def __init__(self, log, host, port, tenant):
+        self._log = log
+        self._host = host
+        self._port = port
+        self._tenant = tenant
+
+        self._session = requests.Session()
+
+    def get_instance(self, instance_uuid):
+        url = "http://{host}:{port}/openmano/{tenant}/instances/{instance}".format(
+                host=self._host,
+                port=self._port,
+                tenant=self._tenant,
+                instance=instance_uuid,
+                )
+
+        resp = self._session.get(url)
+        try:
+            resp.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            raise InstanceStatusError(e)
+
+        return resp.json()
+
+    def get_instance_vm_console_url(self, instance_uuid, vim_uuid):
+        url = "http://{host}:{port}/openmano/{tenant}/instances/{instance}/action".format(
+            host=self._host,
+            port=self._port,
+            tenant=self._tenant,
+            instance=instance_uuid,
+            )
+
+        console_types = ("novnc", "spice-html5", "xvpnvc", "rdp-html5")
+        for console_type in console_types:
+            payload_input = {"console":console_type, "vms":[vim_uuid]}
+            payload_data = json.dumps(payload_input)
+            resp = self._session.post(url, headers={'content-type': 'application/json'},
+                                      data=payload_data)
+            try:
+                resp.raise_for_status()
+            except requests.exceptions.HTTPError as e:
+                raise InstanceStatusError(e)
+            result = resp.json()
+            if vim_uuid in result and (result[vim_uuid]["vim_result"] == 1 or result[vim_uuid]["vim_result"] == 200):
+                return result[vim_uuid]["description"]
+
+        return None
+
+
+class OpenmanoCliAPI(object):
+    """ This class implements the necessary funtionality to interact with  """
+
+    CMD_TIMEOUT = 30
+
+    def __init__(self, log, host, port, tenant):
+        self._log = log
+        self._host = host
+        self._port = port
+        self._tenant = tenant
+
+    @staticmethod
+    def openmano_cmd_path():
+        return os.path.join(
+               os.environ["RIFT_INSTALL"],
+               "usr/bin/openmano"
+               )
+
+    def _openmano_cmd(self, arg_list, expected_lines=None):
+        cmd_args = list(arg_list)
+        cmd_args.insert(0, self.openmano_cmd_path())
+
+        env = {
+                "OPENMANO_HOST": self._host,
+                "OPENMANO_PORT": str(self._port),
+                "OPENMANO_TENANT": self._tenant,
+                }
+
+        self._log.debug(
+                "Running openmano command (%s) using env (%s)",
+                subprocess.list2cmdline(cmd_args),
+                env,
+                )
+
+        proc = subprocess.Popen(
+                cmd_args,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                universal_newlines=True,
+                env=env
+                )
+        try:
+            stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT)
+        except subprocess.TimeoutExpired:
+            self._log.error("Openmano command timed out")
+            proc.terminate()
+            stdout, stderr = proc.communicate(timeout=self.CMD_TIMEOUT)
+
+        if proc.returncode != 0:
+            self._log.error(
+                    "Openmano command failed (rc=%s) with stdout: %s",
+                    proc.returncode, stdout
+                    )
+            raise OpenmanoCommandFailed(stdout)
+
+        self._log.debug("Openmano command completed with stdout: %s", stdout)
+
+        output_lines = stdout.splitlines()
+        if expected_lines is not None:
+            if len(output_lines) != expected_lines:
+                msg = "Expected %s lines from openmano command. Got %s" % (expected_lines, len(output_lines))
+                self._log.error(msg)
+                raise OpenmanoUnexpectedOutput(msg)
+
+        return output_lines
+
+
+    def vnf_create(self, vnf_yaml_str):
+        """ Create a Openmano VNF from a Openmano VNF YAML string """
+
+        self._log.debug("Creating VNF: %s", vnf_yaml_str)
+
+        with tempfile.NamedTemporaryFile() as vnf_file_hdl:
+            vnf_file_hdl.write(vnf_yaml_str.encode())
+            vnf_file_hdl.flush()
+
+            try:
+                output_lines = self._openmano_cmd(
+                        ["vnf-create", vnf_file_hdl.name],
+                        expected_lines=1
+                        )
+            except OpenmanoCommandFailed as e:
+                if "already in use" in str(e):
+                    raise VNFExistsError("VNF was already added")
+                raise
+
+        vnf_info_line = output_lines[0]
+        vnf_id, vnf_name = vnf_info_line.split(" ", 1)
+
+        self._log.info("VNF %s Created: %s", vnf_name, vnf_id)
+
+        return vnf_id, vnf_name
+
+    def vnf_delete(self, vnf_uuid):
+        self._openmano_cmd(
+                ["vnf-delete", vnf_uuid, "-f"],
+                )
+
+        self._log.info("VNF Deleted: %s", vnf_uuid)
+
+    def vnf_list(self):
+        try:
+            output_lines = self._openmano_cmd(
+                    ["vnf-list"],
+                    )
+        except OpenmanoCommandFailed as e:
+            self._log.warning("Vnf listing returned an error: %s", str(e))
+            return {}
+
+        name_uuid_map = {}
+        for line in output_lines:
+            line = line.strip()
+            uuid, name = line.split(" ", 1)
+            name_uuid_map[name] = uuid
+
+        return name_uuid_map
+
+    def ns_create(self, ns_yaml_str, name=None):
+        self._log.info("Creating NS: %s", ns_yaml_str)
+
+        with tempfile.NamedTemporaryFile() as ns_file_hdl:
+            ns_file_hdl.write(ns_yaml_str.encode())
+            ns_file_hdl.flush()
+
+            cmd_args = ["scenario-create", ns_file_hdl.name]
+            if name is not None:
+                cmd_args.extend(["--name", name])
+
+            output_lines = self._openmano_cmd(
+                    cmd_args,
+                    expected_lines=1
+                    )
+
+        ns_info_line = output_lines[0]
+        ns_id, ns_name = ns_info_line.split(" ", 1)
+
+        self._log.info("NS %s Created: %s", ns_name, ns_id)
+
+        return ns_id, ns_name
+
+    def ns_list(self):
+        self._log.debug("Getting NS list")
+
+        try:
+            output_lines = self._openmano_cmd(
+                    ["scenario-list"],
+                    )
+
+        except OpenmanoCommandFailed as e:
+            self._log.warning("NS listing returned an error: %s", str(e))
+            return {}
+
+        name_uuid_map = {}
+        for line in output_lines:
+            line = line.strip()
+            uuid, name = line.split(" ", 1)
+            name_uuid_map[name] = uuid
+
+        return name_uuid_map
+
+    def ns_delete(self, ns_uuid):
+        self._log.info("Deleting NS: %s", ns_uuid)
+
+        self._openmano_cmd(
+                ["scenario-delete", ns_uuid, "-f"],
+                )
+
+        self._log.info("NS Deleted: %s", ns_uuid)
+
+    def ns_instance_list(self):
+        self._log.debug("Getting NS instance list")
+
+        try:
+            output_lines = self._openmano_cmd(
+                    ["instance-scenario-list"],
+                    )
+
+        except OpenmanoCommandFailed as e:
+            self._log.warning("Instance scenario listing returned an error: %s", str(e))
+            return {}
+
+        if "No scenario instances were found" in output_lines[0]:
+            self._log.debug("No openmano instances were found")
+            return {}
+
+        name_uuid_map = {}
+        for line in output_lines:
+            line = line.strip()
+            uuid, name = line.split(" ", 1)
+            name_uuid_map[name] = uuid
+
+        return name_uuid_map
+
+    def ns_instance_scenario_create(self, instance_yaml_str):
+        """ Create a Openmano NS instance from input YAML string """
+
+        self._log.debug("Instantiating instance: %s", instance_yaml_str)
+
+        with tempfile.NamedTemporaryFile() as ns_instance_file_hdl:
+            ns_instance_file_hdl.write(instance_yaml_str.encode())
+            ns_instance_file_hdl.flush()
+
+            try:
+                output_lines = self._openmano_cmd(
+                        ["instance-scenario-create", ns_instance_file_hdl.name],
+                        expected_lines=1
+                        )
+            except OpenmanoCommandFailed as e:
+                raise
+
+        uuid, _ = output_lines[0].split(" ", 1)
+
+        self._log.info("NS Instance Created: %s", uuid)
+
+        return uuid
+
+    def ns_instantiate(self, scenario_name, instance_name, datacenter_name=None):
+        self._log.info(
+                "Instantiating NS %s using instance name %s",
+                scenario_name,
+                instance_name,
+                )
+
+        cmd_args = ["scenario-deploy", scenario_name, instance_name]
+        if datacenter_name is not None:
+            cmd_args.extend(["--datacenter", datacenter_name])
+
+        output_lines = self._openmano_cmd(
+                cmd_args,
+                expected_lines=4
+                )
+
+        uuid, _ = output_lines[0].split(" ", 1)
+
+        self._log.info("NS Instance Created: %s", uuid)
+
+        return uuid
+
+    def ns_terminate(self, ns_instance_name):
+        self._log.info("Terminating NS: %s", ns_instance_name)
+
+        self._openmano_cmd(
+                ["instance-scenario-delete", ns_instance_name, "-f"],
+                )
+
+        self._log.info("NS Instance Deleted: %s", ns_instance_name)
+
+    def datacenter_list(self):
+        lines = self._openmano_cmd(["datacenter-list",])
+
+        # The results returned from openmano are formatted with whitespace and
+        # datacenter names may contain whitespace as well, so we use a regular
+        # expression to parse each line of the results return from openmano to
+        # extract the uuid and name of a datacenter.
+        hex = '[0-9a-fA-F]'
+        uuid_pattern = '(xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx)'.replace('x', hex)
+        name_pattern = '(.+?)'
+        datacenter_regex = re.compile(r'{uuid}\s+\b{name}\s*$'.format(
+            uuid=uuid_pattern,
+            name=name_pattern,
+            ))
+
+        # Parse the results for the datacenter uuids and names
+        datacenters = list()
+        for line in lines:
+            result = datacenter_regex.match(line)
+            if result is not None:
+                uuid, name = result.groups()
+                datacenters.append((uuid, name))
+
+        return datacenters
+
+
+def valid_uuid(uuid_str):
+    uuid_re = re.compile(
+            "^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$".replace('x', '[0-9a-fA-F]')
+            )
+
+    if not uuid_re.match(uuid_str):
+        raise argparse.ArgumentTypeError("Got a valid uuid: %s" % uuid_str)
+
+    return uuid_str
+
+
+def parse_args(argv=sys.argv[1:]):
+    """ Parse the command line arguments
+
+    Arguments:
+        argv - The list of arguments to parse
+
+    Returns:
+        Argparse Namespace instance
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '-d', '--host',
+        default='localhost',
+        help="Openmano host/ip",
+        )
+
+    parser.add_argument(
+        '-p', '--port',
+        default='9090',
+        help="Openmano port",
+        )
+
+    parser.add_argument(
+        '-t', '--tenant',
+        required=True,
+        type=valid_uuid,
+        help="Openmano tenant uuid to use",
+        )
+
+    subparsers = parser.add_subparsers(dest='command', help='openmano commands')
+
+    vnf_create_parser = subparsers.add_parser(
+            'vnf-create',
+            help="Adds a openmano vnf into the catalog"
+            )
+    vnf_create_parser.add_argument(
+            "file",
+            help="location of the JSON file describing the VNF",
+            type=argparse.FileType('rb'),
+            )
+
+    vnf_delete_parser = subparsers.add_parser(
+            'vnf-delete',
+            help="Deletes a openmano vnf into the catalog"
+            )
+    vnf_delete_parser.add_argument(
+            "uuid",
+            help="The vnf to delete",
+            type=valid_uuid,
+            )
+
+
+    ns_create_parser = subparsers.add_parser(
+            'scenario-create',
+            help="Adds a openmano ns scenario into the catalog"
+            )
+    ns_create_parser.add_argument(
+            "file",
+            help="location of the JSON file describing the NS",
+            type=argparse.FileType('rb'),
+            )
+
+    ns_delete_parser = subparsers.add_parser(
+            'scenario-delete',
+            help="Deletes a openmano ns into the catalog"
+            )
+    ns_delete_parser.add_argument(
+            "uuid",
+            help="The ns to delete",
+            type=valid_uuid,
+            )
+
+
+    ns_instance_create_parser = subparsers.add_parser(
+            'scenario-deploy',
+            help="Deploys a openmano ns scenario into the catalog"
+            )
+    ns_instance_create_parser.add_argument(
+            "scenario_name",
+            help="The ns scenario name to deploy",
+            )
+    ns_instance_create_parser.add_argument(
+            "instance_name",
+            help="The ns instance name to deploy",
+            )
+
+
+    ns_instance_delete_parser = subparsers.add_parser(
+            'instance-scenario-delete',
+            help="Deploys a openmano ns scenario into the catalog"
+            )
+    ns_instance_delete_parser.add_argument(
+            "instance_name",
+            help="The ns instance name to delete",
+            )
+
+
+    _ = subparsers.add_parser(
+            'datacenter-list',
+            )
+
+    args = parser.parse_args(argv)
+
+    return args
+
+
+def main():
+    logging.basicConfig(level=logging.DEBUG)
+    logger = logging.getLogger("openmano_client.py")
+
+    if "RIFT_INSTALL" not in os.environ:
+        logger.error("Must be in rift-shell to run.")
+        sys.exit(1)
+
+    args = parse_args()
+    openmano_cli = OpenmanoCliAPI(logger, args.host, args.port, args.tenant)
+
+    if args.command == "vnf-create":
+        openmano_cli.vnf_create(args.file.read())
+
+    elif args.command == "vnf-delete":
+        openmano_cli.vnf_delete(args.uuid)
+
+    elif args.command == "scenario-create":
+        openmano_cli.ns_create(args.file.read())
+
+    elif args.command == "scenario-delete":
+        openmano_cli.ns_delete(args.uuid)
+
+    elif args.command == "scenario-deploy":
+        openmano_cli.ns_instantiate(args.scenario_name, args.instance_name)
+
+    elif args.command == "instance-scenario-delete":
+        openmano_cli.ns_terminate(args.instance_name)
+
+    elif args.command == "datacenter-list":
+        for uuid, name in openmano_cli.datacenter_list():
+            print("{} {}".format(uuid, name))
+
+    else:
+        logger.error("Unknown command: %s", args.command)
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
diff --git a/models/openmano/python/rift/openmano/rift2openmano.py b/models/openmano/python/rift/openmano/rift2openmano.py
new file mode 100755 (executable)
index 0000000..a98335b
--- /dev/null
@@ -0,0 +1,566 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import collections
+import logging
+import math
+import os
+import sys
+import tempfile
+import yaml
+
+from gi.repository import (
+    RwYang,
+    RwVnfdYang,
+    RwNsdYang,
+    )
+
+logger = logging.getLogger("rift2openmano.py")
+
+
+class VNFNotFoundError(Exception):
+    pass
+
+
+class RiftNSD(object):
+    model = RwYang.Model.create_libncx()
+    model.load_module('nsd')
+    model.load_module('rw-nsd')
+
+    def __init__(self, descriptor):
+        self._nsd = descriptor
+
+    def __str__(self):
+        return str(self._nsd)
+
+    @property
+    def name(self):
+        return self._nsd.name
+
+    @property
+    def id(self):
+        return self._nsd.id
+
+    @property
+    def vnfd_ids(self):
+        return [c.vnfd_id_ref for c in self._nsd.constituent_vnfd]
+
+    @property
+    def constituent_vnfds(self):
+        return self._nsd.constituent_vnfd
+
+    @property
+    def vlds(self):
+        return self._nsd.vld
+
+    @property
+    def cps(self):
+        return self._nsd.connection_point
+
+    @property
+    def description(self):
+        return self._nsd.description
+
+    @classmethod
+    def from_xml_file_hdl(cls, hdl):
+        hdl.seek(0)
+        descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        descriptor.from_xml_v2(RiftNSD.model, hdl.read())
+        return cls(descriptor)
+
+    @classmethod
+    def from_yaml_file_hdl(cls, hdl):
+        hdl.seek(0)
+        descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        descriptor.from_yaml(RiftNSD.model, hdl.read())
+        return cls(descriptor)
+
+    @classmethod
+    def from_dict(cls, nsd_dict):
+        descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict)
+        return cls(descriptor)
+
+
+class RiftVNFD(object):
+    model = RwYang.Model.create_libncx()
+    model.load_module('vnfd')
+    model.load_module('rw-vnfd')
+
+    def __init__(self, descriptor):
+        self._vnfd = descriptor
+
+    def __str__(self):
+        return str(self._vnfd)
+
+    @property
+    def id(self):
+        return self._vnfd.id
+
+    @property
+    def name(self):
+        return self._vnfd.name
+
+    @property
+    def description(self):
+        return self._vnfd.description
+
+    @property
+    def cps(self):
+        return self._vnfd.connection_point
+
+    @property
+    def vdus(self):
+        return self._vnfd.vdu
+
+    @property
+    def internal_vlds(self):
+        return self._vnfd.internal_vld
+
+    @classmethod
+    def from_xml_file_hdl(cls, hdl):
+        hdl.seek(0)
+        descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        descriptor.from_xml_v2(RiftVNFD.model, hdl.read())
+        return cls(descriptor)
+
+    @classmethod
+    def from_yaml_file_hdl(cls, hdl):
+        hdl.seek(0)
+        descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        descriptor.from_yaml(RiftVNFD.model, hdl.read())
+        return cls(descriptor)
+
+    @classmethod
+    def from_dict(cls, vnfd_dict):
+        descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
+        return cls(descriptor)
+
+
+def is_writable_directory(dir_path):
+    """ Returns True if dir_path is writable, False otherwise
+
+    Arguments:
+        dir_path - A directory path
+    """
+    if not os.path.exists(dir_path):
+        raise ValueError("Directory does not exist: %s", dir_path)
+
+    try:
+        testfile = tempfile.TemporaryFile(dir=dir_path)
+        testfile.close()
+    except OSError:
+        return False
+
+    return True
+
+
+def create_vnfd_from_xml_files(vnfd_file_hdls):
+    """ Create a list of RiftVNFD instances from xml file handles
+
+    Arguments:
+        vnfd_file_hdls - Rift VNFD XML file handles
+
+    Returns:
+        A list of RiftVNFD instances
+    """
+    vnfd_dict = {}
+    for vnfd_file_hdl in vnfd_file_hdls:
+        vnfd = RiftVNFD.from_xml_file_hdl(vnfd_file_hdl)
+        vnfd_dict[vnfd.id] = vnfd
+
+    return vnfd_dict
+
+def create_vnfd_from_yaml_files(vnfd_file_hdls):
+    """ Create a list of RiftVNFD instances from xml file handles
+
+    Arguments:
+        vnfd_file_hdls - Rift VNFD YAML file handles
+
+    Returns:
+        A list of RiftVNFD instances
+    """
+    vnfd_dict = {}
+    for vnfd_file_hdl in vnfd_file_hdls:
+        vnfd = RiftVNFD.from_yaml_file_hdl(vnfd_file_hdl)
+        vnfd_dict[vnfd.id] = vnfd
+
+    return vnfd_dict
+
+
+def create_nsd_from_xml_file(nsd_file_hdl):
+    """ Create a list of RiftNSD instances from xml file handles
+
+    Arguments:
+        nsd_file_hdls - Rift NSD XML file handles
+
+    Returns:
+        A list of RiftNSD instances
+    """
+    nsd = RiftNSD.from_xml_file_hdl(nsd_file_hdl)
+    return nsd
+
+def create_nsd_from_yaml_file(nsd_file_hdl):
+    """ Create a list of RiftNSD instances from yaml file handles
+
+    Arguments:
+        nsd_file_hdls - Rift NSD XML file handles
+
+    Returns:
+        A list of RiftNSD instances
+    """
+    nsd = RiftNSD.from_yaml_file_hdl(nsd_file_hdl)
+    return nsd
+
+
+def ddict():
+    return collections.defaultdict(dict)
+
+def convert_vnfd_name(vnfd_name, member_idx):
+    return vnfd_name + "__" + str(member_idx)
+
+
+def rift2openmano_nsd(rift_nsd, rift_vnfds):
+    for vnfd_id in rift_nsd.vnfd_ids:
+        if vnfd_id not in rift_vnfds:
+            raise VNFNotFoundError("VNF id %s not provided" % vnfd_id)
+
+    openmano = {}
+    openmano["name"] = rift_nsd.name
+    openmano["description"] = rift_nsd.description
+    topology = {}
+    openmano["topology"] = topology
+
+    topology["nodes"] = {}
+    for vnfd in rift_nsd.constituent_vnfds:
+        vnfd_id = vnfd.vnfd_id_ref
+        rift_vnfd = rift_vnfds[vnfd_id]
+        member_idx = vnfd.member_vnf_index
+        topology["nodes"][rift_vnfd.name + "__" + str(member_idx)] = {
+                "type": "VNF",
+                "VNF model": rift_vnfd.name
+                }
+
+    for vld in rift_nsd.vlds:
+        # Openmano has both bridge_net and dataplane_net models for network types
+        # For now, since we are using openmano in developer mode lets just hardcode
+        # to bridge_net since it won't matter anyways.
+        # topology["nodes"][vld.name] = {"type": "network", "model": "bridge_net"}
+        pass
+
+    topology["connections"] = {}
+    for vld in rift_nsd.vlds:
+
+        # Create a connections entry for each external VLD
+        topology["connections"][vld.name] = {}
+        topology["connections"][vld.name]["nodes"] = []
+
+        if vld.vim_network_name:
+            if vld.name not in topology["nodes"]:
+                topology["nodes"][vld.name] = {
+                        "type": "external_network",
+                        "model": vld.name,
+                        }
+
+            # Add the external network to the list of connection points
+            topology["connections"][vld.name]["nodes"].append(
+                    {vld.name: "0"}
+                    )
+        elif vld.provider_network.has_field("physical_network"):
+            # Add the external datacenter network to the topology
+            # node list if it isn't already added
+            ext_net_name = vld.provider_network.physical_network
+            ext_net_name_with_seg = ext_net_name
+            if vld.provider_network.has_field("segmentation_id"):
+                ext_net_name_with_seg += ":{}".format(vld.provider_network.segmentation_id)
+
+            if ext_net_name not in topology["nodes"]:
+                topology["nodes"][ext_net_name] = {
+                        "type": "external_network",
+                        "model": ext_net_name_with_seg,
+                        }
+
+            # Add the external network to the list of connection points
+            topology["connections"][vld.name]["nodes"].append(
+                    {ext_net_name: "0"}
+                    )
+
+
+        for vnfd_cp in vld.vnfd_connection_point_ref:
+
+            # Get the RIFT VNF for this external VLD connection point
+            vnfd = rift_vnfds[vnfd_cp.vnfd_id_ref]
+
+            # For each VNF in this connection, use the same interface name
+            topology["connections"][vld.name]["type"] = "link"
+            # Vnf ref is the vnf name with the member_vnf_idx appended
+            member_idx = vnfd_cp.member_vnf_index_ref
+            vnf_ref = vnfd.name + "__" + str(member_idx)
+            topology["connections"][vld.name]["nodes"].append(
+                {
+                    vnf_ref: vnfd_cp.vnfd_connection_point_ref
+                }
+            )
+
+    return openmano
+
+
+def rift2openmano_vnfd(rift_vnfd):
+    openmano_vnf = {"vnf":{}}
+    vnf = openmano_vnf["vnf"]
+
+    vnf["name"] = rift_vnfd.name
+    vnf["description"] = rift_vnfd.description
+
+    vnf["external-connections"] = []
+
+    def find_vdu_and_ext_if_by_cp_ref(cp_ref_name):
+        for vdu in rift_vnfd.vdus:
+            for ext_if in vdu.external_interface:
+                if ext_if.vnfd_connection_point_ref == cp_ref_name:
+                    return vdu, ext_if
+
+        raise ValueError("External connection point reference %s not found" % cp_ref_name)
+
+    def find_vdu_and_int_if_by_cp_ref(cp_ref_id):
+        for vdu in rift_vnfd.vdus:
+            for int_if in vdu.internal_interface:
+                if int_if.vdu_internal_connection_point_ref == cp_ref_id:
+                    return vdu, int_if
+
+        raise ValueError("Internal connection point reference %s not found" % cp_ref_id)
+
+    def rift2openmano_if_type(rift_type):
+        if rift_type == "OM_MGMT":
+            return "mgmt"
+        elif rift_type == "VIRTIO":
+            return "bridge"
+        else:
+            return "data"
+
+    # Add all external connections
+    for cp in rift_vnfd.cps:
+        # Find the VDU and and external interface for this connection point
+        vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name)
+        connection = {
+            "name": cp.name,
+            "type": rift2openmano_if_type(ext_if.virtual_interface.type_yang),
+            "VNFC": vdu.name,
+            "local_iface_name": ext_if.name,
+            "description": "%s iface on VDU %s" % (ext_if.name, vdu.name),
+            }
+
+        vnf["external-connections"].append(connection)
+
+    # Add all internal networks
+    for vld in rift_vnfd.internal_vlds:
+        connection = {
+            "name": vld.name,
+            "description": vld.description,
+            "type": "data",
+            "elements": [],
+            }
+
+        # Add the specific VDU connection points
+        for int_cp_ref in vld.internal_connection_point_ref:
+            vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp_ref)
+            connection["elements"].append({
+                "VNFC": vdu.name,
+                "local_iface_name": int_if.name,
+                })
+        if "internal-connections" not in vnf:
+            vnf["internal-connections"] = []
+
+        vnf["internal-connections"].append(connection)
+
+    # Add VDU's
+    vnf["VNFC"] = []
+    for vdu in rift_vnfd.vdus:
+        vnfc = {
+            "name": vdu.name,
+            "description": vdu.name,
+            "VNFC image": vdu.image if os.path.isabs(vdu.image) else "/var/images/{}".format(vdu.image),
+            "numas": [{
+                "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1),
+                "interfaces":[],
+                }],
+            "bridge-ifaces": [],
+            }
+
+        numa_node_policy = vdu.guest_epa.numa_node_policy
+        if numa_node_policy.has_field("node"):
+            numa_node = numa_node_policy.node[0]
+
+            if numa_node.has_field("paired_threads"):
+                if numa_node.paired_threads.has_field("num_paired_threads"):
+                    vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads
+                if len(numa_node.paired_threads.paired_thread_ids) > 0:
+                    vnfc["numas"][0]["paired-threads-id"] = []
+                    for pair in numa_node.paired_threads.paired_thread_ids:
+                         vnfc["numas"][0]["paired-threads-id"].append(
+                                 [pair.thread_a, pair.thread_b]
+                                 )
+
+        else:
+            if vdu.vm_flavor.has_field("vcpu_count"):
+                vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1)
+
+        if vdu.has_field("hypervisor_epa"):
+            vnfc["hypervisor"] = {}
+            if vdu.hypervisor_epa.has_field("type"):
+                if vdu.hypervisor_epa.type_yang == "REQUIRE_KVM":
+                    vnfc["hypervisor"]["type"] = "QEMU-kvm"
+
+            if vdu.hypervisor_epa.has_field("version"):
+                vnfc["hypervisor"]["version"] = vdu.hypervisor_epa.version
+
+        if vdu.has_field("host_epa"):
+            vnfc["processor"] = {}
+            if vdu.host_epa.has_field("om_cpu_model_string"):
+                vnfc["processor"]["model"] = vdu.host_epa.om_cpu_model_string
+            if vdu.host_epa.has_field("om_cpu_feature"):
+                vnfc["processor"]["features"] = []
+                for feature in vdu.host_epa.om_cpu_feature:
+                    vnfc["processor"]["features"].append(feature)
+
+
+        if vdu.vm_flavor.has_field("storage_gb"):
+            vnfc["disk"] = vdu.vm_flavor.storage_gb
+
+        vnf["VNFC"].append(vnfc)
+
+        for int_if in list(vdu.internal_interface) + list(vdu.external_interface):
+            intf = {
+                "name": int_if.name,
+                }
+            if int_if.virtual_interface.has_field("vpci"):
+                intf["vpci"] = int_if.virtual_interface.vpci
+
+            if int_if.virtual_interface.type_yang in ["VIRTIO", "OM_MGMT"]:
+                vnfc["bridge-ifaces"].append(intf)
+
+            elif int_if.virtual_interface.type_yang == "SR-IOV":
+                intf["bandwidth"] = "10 Gbps"
+                intf["dedicated"] = "yes:sriov"
+                vnfc["numas"][0]["interfaces"].append(intf)
+
+            elif int_if.virtual_interface.type_yang == "PCI_PASSTHROUGH":
+                intf["bandwidth"] = "10 Gbps"
+                intf["dedicated"] = "yes"
+                if "interfaces" not in vnfc["numas"][0]:
+                    vnfc["numas"][0]["interfaces"] = []
+                vnfc["numas"][0]["interfaces"].append(intf)
+            else:
+                raise ValueError("Interface type %s not supported" % int_if.virtual_interface)
+
+            if int_if.virtual_interface.has_field("bandwidth"):
+                if int_if.virtual_interface.bandwidth != 0:
+                    bps = int_if.virtual_interface.bandwidth
+
+                    # Calculate the bits per second conversion
+                    for x in [('M', 1000000), ('G', 1000000000)]:
+                        if bps/x[1] >= 1:
+                            intf["bandwidth"] = "{} {}bps".format(math.ceil(bps/x[1]), x[0])
+
+
+    return openmano_vnf
+
+
+def parse_args(argv=sys.argv[1:]):
+    """ Parse the command line arguments
+
+    Arguments:
+        arv - The list of arguments to parse
+
+    Returns:
+        Argparse Namespace instance
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '-o', '--outdir',
+        default='-',
+        help="Directory to output converted descriptors. Default is stdout",
+        )
+
+    parser.add_argument(
+        '-n', '--nsd-file-hdl',
+        metavar="nsd_xml_file",
+        type=argparse.FileType('r'),
+        help="Rift NSD Descriptor File",
+        )
+
+    parser.add_argument(
+        '-v', '--vnfd-file-hdls',
+        metavar="vnfd_xml_file",
+        action='append',
+        type=argparse.FileType('r'),
+        help="Rift VNFD Descriptor File",
+        )
+
+    args = parser.parse_args(argv)
+
+    if not os.path.exists(args.outdir):
+        os.makedirs(args.outdir)
+
+    if not is_writable_directory(args.outdir):
+        logging.error("Directory %s is not writable", args.outdir)
+        sys.exit(1)
+
+    return args
+
+
+def write_yaml_to_file(name, outdir, desc_dict):
+    file_name = "%s.yaml" % name
+    yaml_str = yaml.dump(desc_dict)
+    if outdir == "-":
+        sys.stdout.write(yaml_str)
+        return
+
+    file_path = os.path.join(outdir, file_name)
+    dir_path = os.path.dirname(file_path)
+    if not os.path.exists(dir_path):
+        os.makedirs(dir_path)
+
+    with open(file_path, "w") as hdl:
+        hdl.write(yaml_str)
+
+    logger.info("Wrote descriptor to %s", file_path)
+
+
+def main(argv=sys.argv[1:]):
+    args = parse_args(argv)
+
+    nsd = None
+    if args.vnfd_file_hdls is not None:
+        vnf_dict = create_vnfd_from_xml_files(args.vnfd_file_hdls)
+
+    if args.nsd_file_hdl is not None:
+        nsd = create_nsd_from_xml_file(args.nsd_file_hdl)
+
+    openmano_nsd = rift2openmano_nsd(nsd, vnf_dict)
+
+    write_yaml_to_file(openmano_nsd["name"], args.outdir, openmano_nsd)
+
+    for vnf in vnf_dict.values():
+        openmano_vnf = rift2openmano_vnfd(vnf)
+        write_yaml_to_file(openmano_vnf["vnf"]["name"], args.outdir, openmano_vnf)
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.WARNING)
+    main()
diff --git a/models/openmano/src/CMakeLists.txt b/models/openmano/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..949bb8b
--- /dev/null
@@ -0,0 +1,82 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 2014/12/11
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+configure_file(
+  ${CMAKE_CURRENT_SOURCE_DIR}/generate_tidgen_packages.sh.in
+  ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh
+  ESCAPE_QUOTES @ONLY
+  )
+
+add_custom_command(
+  OUTPUT
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz
+
+  COMMAND
+    ${CMAKE_CURRENT_BINARY_DIR}/generate_tidgen_packages.sh
+
+  DEPENDS
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml
+    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/openmano/src/openmano2rift.py
+  )
+
+add_custom_target(tidgen ALL
+  DEPENDS
+    mano_yang
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz
+  )
+
+install(
+  FILES
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_4sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_4sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/2tidgenMWC_2sriov_no_ctrlnet.tar.gz
+    ${CMAKE_CURRENT_BINARY_DIR}/tidgenMWC_2sriov_no_ctrlnet.tar.gz
+
+    
+  DESTINATION
+    usr/rift/mano/examples/tidgen_ns
+    COMPONENT ${PKG_LONG_NAME}
+  )
diff --git a/models/openmano/src/generate_tidgen_packages.sh.in b/models/openmano/src/generate_tidgen_packages.sh.in
new file mode 100755 (executable)
index 0000000..208d2cd
--- /dev/null
@@ -0,0 +1,40 @@
+#! /bin/bash
+
+set -e
+
+SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@
+BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@
+PROJECT_TOP_DIR=@PROJECT_TOP_DIR@
+
+# These paths are needed for finding the overrides and so files
+PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+
+# Remove any old directories
+rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov
+rm -rf ${BINARY_DIR}/tidgenMWC_4sriov
+rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov
+rm -rf ${BINARY_DIR}/tidgenMWC_2sriov
+rm -rf ${BINARY_DIR}/2tidgenMWC_2sriov_noctrlnet
+rm -rf ${BINARY_DIR}/tidgenMWC_2sriov_noctrlnet
+rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov_noctrlnet
+rm -rf ${BINARY_DIR}/tidgenMWC_4sriov_noctrlnet
+
+
+# Generate the descriptors
+${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov.yaml
+${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov.yaml
+${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml
+${SOURCE_DIR}/openmano2rift.py -o ${BINARY_DIR} @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml @RIFT_SUBMODULE_SOURCE_ROOT@/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml
+
+
+# Generate the tar files
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov 
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov 
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_2sriov_no_ctrlnet 
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_2sriov_no_ctrlnet
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} tidgenMWC_4sriov_no_ctrlnet 
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} 2tidgenMWC_4sriov_no_ctrlnet
diff --git a/models/openmano/src/openmano2rift.py b/models/openmano/src/openmano2rift.py
new file mode 100755 (executable)
index 0000000..503ad89
--- /dev/null
@@ -0,0 +1,485 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import itertools
+import logging
+import os
+import sys
+import tempfile
+import uuid
+import yaml
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwNsdYang', '1.0')
+from gi.repository import (
+    RwYang,
+    RwVnfdYang,
+    RwNsdYang,
+    )
+
+logging.basicConfig(level=logging.WARNING)
+logger = logging.getLogger("openmano2rift.py")
+
+
+class UnknownVNFError(Exception):
+    pass
+
+
+class DescriptorFileWriter(object):
+    def __init__(self, module_list, output_dir, output_format):
+        self._model = RwYang.Model.create_libncx()
+        for module in module_list:
+            self._model.load_module(module)
+
+        self._output_dir = output_dir
+        self._output_format = output_format
+
+    def _write_file(self, file_name, output):
+        file_path = os.path.join(self._output_dir, file_name)
+        dir_path = os.path.dirname(file_path)
+        if not os.path.exists(dir_path):
+            os.makedirs(dir_path)
+
+        with open(file_path, "w") as hdl:
+            hdl.write(output)
+
+        logger.info("Wrote descriptor to %s", file_path)
+
+    def _write_json(self, descriptor, subdir):
+        self._write_file(
+            '%s.json' % os.path.join(descriptor.name, subdir, descriptor.name),
+            descriptor.descriptor.to_json(self._model)
+            )
+
+    def _write_xml(self, descriptor, subdir):
+        self._write_file(
+            '%s.xml' % os.path.join(descriptor.name, subdir, descriptor.name),
+            descriptor.descriptor.to_xml_v2(self._model, pretty_print=True)
+            )
+
+    def _write_yaml(self, descriptor, subdir):
+        self._write_file(
+            '%s.yaml' % os.path.join(descriptor.name, subdir, descriptor.name),
+            yaml.dump(descriptor.descriptor.as_dict()),
+            )
+
+    def write_descriptor(self, descriptor, subdir=""):
+        if self._output_format == 'json':
+            self._write_json(descriptor, subdir=subdir)
+
+        elif self._output_format == 'xml':
+            self._write_xml(descriptor, subdir=subdir)
+
+        elif self._output_format == 'yaml':
+            self._write_yaml(descriptor, subdir=subdir)
+
+
+class RiftManoDescriptor(object):
+    def __init__(self, openmano=None):
+        self.openmano = openmano
+        self.descriptor = None
+
+
+class RiftNS(RiftManoDescriptor):
+    def __init__(self, openmano=None):
+        super().__init__(openmano)
+        self.nsd_catalog = None
+        self.nsd = None
+        self.name = None
+
+    def get_vnfd_id(self, vnf_list, vnf_name):
+        for vnf in vnf_list:
+            if vnf.name == vnf_name:
+                return vnf.vnfd.id
+
+        # Didn't find the vnf just return the vnf_name
+        return vnf_name
+
+    def openmano2rift(self, vnf_list):
+        self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog()
+        openmano_nsd = self.openmano.dictionary
+        self.name = openmano_nsd['name']
+        nsd = self.descriptor.nsd.add()
+        nsd.id = str(uuid.uuid1())
+        nsd.name = self.name
+        nsd.short_name = self.name
+        nsd.description = openmano_nsd['description']
+
+        nodes = openmano_nsd['topology']['nodes']
+        connections = openmano_nsd['topology']['connections']
+
+        def create_consituent_vnfds():
+            vnf_member_index_dict = {}
+
+            vnfd_idx_gen = itertools.count(1)
+            for key in nodes:
+                node = nodes[key]
+                if node['type'] != 'VNF':
+                    continue
+
+                vnfd_idx = next(vnfd_idx_gen)
+                constituent_vnfd = nsd.constituent_vnfd.add()
+                constituent_vnfd.member_vnf_index = vnfd_idx
+                constituent_vnfd.vnfd_id_ref = self.get_vnfd_id(vnf_list, node['VNF model'])
+                vnf_member_index_dict[key] = vnfd_idx
+
+            return vnf_member_index_dict
+
+        def create_connections(vnf_member_index_dict):
+            keys = connections.keys()
+            for key in keys:
+                # TODO: Need clarification from TEF
+                # skip the mgmtnet in OpenMANO descriptor
+                if key == 'mgmtnet':
+                    continue
+                conn = connections[key]
+                vld = nsd.vld.add()
+                vld.from_dict(dict(
+                    id=str(uuid.uuid1()),
+                    name=key,
+                    short_name=key,
+                    type_yang='ELAN',
+                    ))
+
+                nodes = conn['nodes']
+                for node, node_keys in [(node, node.keys()) for node in nodes]:
+                    for node_key in node_keys:
+                        topo_node = openmano_nsd['topology']['nodes'][node_key]
+                        if topo_node['type'] == 'VNF':
+                            cpref = vld.vnfd_connection_point_ref.add()
+                            cpref.from_dict(dict(
+                                member_vnf_index_ref=vnf_member_index_dict[node_key],
+                                vnfd_id_ref=self.get_vnfd_id(vnf_list, topo_node['VNF model']),
+                                #vnfd_id_ref=topo_node['VNF model'],
+                                vnfd_connection_point_ref=node[node_key],
+                                ))
+                            if key != 'control-net':
+                                vld.provider_network.physical_network = 'physnet_sriov'
+                                vld.provider_network.overlay_type = 'VLAN'
+
+        vnf_member_index_dict = create_consituent_vnfds()
+        create_connections(vnf_member_index_dict)
+
+
+class RiftVnfd(RiftManoDescriptor):
+    def __init__(self, openmano=None):
+        super().__init__(openmano)
+        self.vnfd_catalog = None
+        self.vnfd = None
+
+    def find_external_connection(self, vdu_name, if_name):
+        """
+        Find if the vdu interface has an external connection.
+        """
+        openmano_vnfd = self.openmano.dictionary['vnf']
+        if 'external-connections' not in openmano_vnfd:
+            return None
+
+        ext_conn_list = openmano_vnfd['external-connections']
+        for ext_conn in ext_conn_list:
+            if ((ext_conn['VNFC'] == vdu_name) and
+                    (ext_conn['local_iface_name'] == if_name)):
+                return ext_conn
+
+        return None
+
+    def openmano2rift(self):
+        self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+        vnfd = self.descriptor.vnfd.add()
+        self.vnfd = vnfd
+        vnfd.id = str(uuid.uuid1())
+
+        openmano_vnfd = self.openmano.dictionary['vnf']
+        self.name = openmano_vnfd['name']
+        vnfd.name = self.name
+        if "description" in openmano_vnfd:
+            vnfd.description = openmano_vnfd['description']
+
+        # Parse and add all the external connection points
+        if 'external-connections' in openmano_vnfd:
+            ext_conn_list = openmano_vnfd['external-connections']
+
+            for ext_conn in ext_conn_list:
+                # TODO: Fix this
+                if ext_conn['name'] == 'eth0':
+                    continue
+                conn_point = vnfd.connection_point.add()
+                conn_point.name = ext_conn['name']
+                conn_point.type_yang = 'VPORT'
+
+        # TODO: Need a concrete example of how openmano descriptor
+        # uses internal connections.
+        if 'internal-connections' in openmano_vnfd:
+            int_conn_list = openmano_vnfd['internal-connections']
+
+        def add_external_interfaces(vdu, numa):
+            if 'interfaces' not in numa:
+                return
+
+            numa_if_list = numa['interfaces']
+            for numa_if in numa_if_list:
+                ext_conn = self.find_external_connection(vdu.name, numa_if['name'])
+                if not ext_conn:
+                    continue
+
+                ext_iface = vdu.external_interface.add()
+                ext_iface.name = numa_if['name']
+                ext_iface.vnfd_connection_point_ref = ext_conn['name']
+                ext_iface.virtual_interface.vpci = numa_if['vpci']
+                if numa_if['dedicated'] == 'no':
+                    ext_iface.virtual_interface.type_yang = 'SR_IOV'
+                else:
+                    ext_iface.virtual_interface.type_yang = 'PCI_PASSTHROUGH'
+
+        vnfc_list = openmano_vnfd['VNFC']
+        for vnfc in vnfc_list:
+            vdu = vnfd.vdu.add()
+            vdu_dict = dict(
+                id=str(uuid.uuid1()),
+                name=vnfc['name'],
+                image=vnfc['VNFC image'],
+                vm_flavor={"storage_gb": vnfc["disk"] if "disk" in vnfc else 20},
+                )
+            if "description" in vnfc:
+                vdu_dict["description"] = vnfc['description']
+
+            vdu.from_dict(vdu_dict)
+
+            vnfd.mgmt_interface.vdu_id = vdu.id
+
+            numa_list = vnfc['numas']
+            memory = 0
+            vcpu_count = 0
+            numa_node_cnt = 0
+
+            for numa in numa_list:
+                node = vdu.guest_epa.numa_node_policy.node.add()
+                node.id = numa_node_cnt
+                # node.memory_mb = int(numa['memory']) * 1024
+                numa_node_cnt += 1
+
+                memory = memory + node.memory_mb
+                # Need a better explanation of "cores", "paired-threads", "threads"
+                # in openmano descriptor. Particularly how they map to cpu and
+                # thread pinning policies
+                if 'paired-threads' in numa:
+                    vcpu_count = vcpu_count + int(numa['paired-threads']) * 2
+
+                if 'cores' in numa:
+                    vcpu_count = vcpu_count + int(numa['cores'])
+
+                add_external_interfaces(vdu, numa)
+
+
+            # vdu.vm_flavor.memory_mb = memory
+            vdu.vm_flavor.memory_mb = 12 * 1024
+            vdu.vm_flavor.vcpu_count = vcpu_count
+            vdu.guest_epa.numa_node_policy.node_cnt = numa_node_cnt
+            vdu.guest_epa.numa_node_policy.mem_policy = 'STRICT'
+            vdu.guest_epa.mempage_size = 'LARGE'
+            vdu.guest_epa.cpu_pinning_policy = 'DEDICATED'
+            vdu.guest_epa.cpu_thread_pinning_policy = 'PREFER'
+
+            # TODO: Enable hypervisor epa
+            # vdu.hypervisor_epa.version = vnfc['hypervisor']['version']
+            # if vnfc['hypervisor']['type'] == 'QEMU-kvm':
+            #     vdu.hypervisor_epa.type_yang = 'REQUIRE_KVM'
+            # else:
+            #     vdu.hypervisor_epa.type_yang = 'PREFER_KVM'
+
+            # TODO: Enable host epa
+            # vdu.host_epa.cpu_feature = vnfc['processor']['features']
+
+            # Parse the bridge interfaces
+            if 'bridge-ifaces' in vnfc:
+                bridge_ifaces = vnfc['bridge-ifaces']
+
+
+                for bridge_iface in bridge_ifaces:
+                    # TODO: Fix this
+                    if bridge_iface['name'] == 'eth0':
+                        continue
+
+                    ext_conn = self.find_external_connection(vdu.name,
+                                                             bridge_iface['name'])
+                    if ext_conn:
+                        ext_iface = vdu.external_interface.add()
+                        ext_iface.name = bridge_iface['name']
+                        ext_iface.vnfd_connection_point_ref = ext_conn['name']
+                        if 'vpci' in bridge_iface:
+                            ext_iface.virtual_interface.vpci = bridge_iface['vpci']
+                        ext_iface.virtual_interface.type_yang = 'VIRTIO'
+
+            # set vpci information for the 'default' network
+            # TODO: This needs to be inferred gtom bridge ifaces, 
+            # need input from TEF
+            vdu.mgmt_vpci = "0000:00:0a.0"
+
+
+class OpenManoDescriptor(object):
+    def __init__(self, yaml_file_hdl):
+        self.dictionary = yaml.load(yaml_file_hdl)
+
+    @property
+    def type(self):
+        """ The descriptor type (ns or vnf)"""
+        if 'vnf' in self.dictionary:
+            return "vnf"
+        else:
+            return "ns"
+
+    def dump(self):
+        """ Dump the Descriptor out to stdout """
+        print(yaml.dump(self.dictionary))
+
+
+def is_writable_directory(dir_path):
+    """ Returns True if dir_path is writable, False otherwise
+
+    Arguments:
+        dir_path - A directory path
+    """
+    if not os.path.exists(dir_path):
+        raise ValueError("Directory does not exist: %s", dir_path)
+
+    try:
+        testfile = tempfile.TemporaryFile(dir=dir_path)
+        testfile.close()
+    except OSError:
+        return False
+
+    return True
+
+
+def create_vnfs_from_yaml_files(yaml_file_hdls):
+    """ Create a list of RiftVnfd instances from yaml file handles
+
+    Arguments:
+        yaml_file_hdls - OpenMano Yaml file handles
+
+    Returns:
+        A list of RiftVnfd instances
+    """
+    vnf_list = []
+    for yaml_file_hdl in yaml_file_hdls:
+        openmano = OpenManoDescriptor(yaml_file_hdl)
+        yaml_file_hdl.seek(0)
+
+        if openmano.type != "vnf":
+            continue
+
+        vnf = RiftVnfd(openmano)
+        vnf.openmano2rift()
+        vnf_list.append(vnf)
+
+    return vnf_list
+
+
+def create_ns_from_yaml_files(yaml_file_hdls, vnf_list):
+    """ Create a list of RiftNS instances from yaml file handles
+
+    Arguments:
+        yaml_file_hdls - OpenMano Yaml file handles
+        vnf_list - list of RiftVnfd
+
+    Returns:
+        A list of RiftNS instances
+    """
+    ns_list = []
+    for yaml_file_hdl in yaml_file_hdls:
+        openmano = OpenManoDescriptor(yaml_file_hdl)
+        if openmano.type != "ns":
+            continue
+
+        net_svc = RiftNS(openmano)
+        net_svc.openmano2rift(vnf_list)
+        ns_list.append(net_svc)
+
+    return ns_list
+
+
+def parse_args(argv=sys.argv[1:]):
+    """ Parse the command line arguments
+
+    Arguments:
+        arv - The list of arguments to parse
+
+    Returns:
+        Argparse Namespace instance
+
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '-o', '--outdir',
+        default='.',
+        help="Directory to output converted descriptors",
+        )
+
+    parser.add_argument(
+        '-f', '--format',
+        choices=['yaml', 'xml', 'json'],
+        default='xml',
+        help="Descriptor output format",
+        )
+
+    parser.add_argument(
+        'yaml_file_hdls',
+        metavar="yaml_file",
+        nargs="+",
+        type=argparse.FileType('r'),
+        help="OpenMano YAML Descriptor File",
+        )
+
+    args = parser.parse_args(argv)
+
+    if not os.path.exists(args.outdir):
+        os.makedirs(args.outdir)
+
+    if not is_writable_directory(args.outdir):
+        logging.error("Directory %s is not writable", args.outdir)
+        sys.exit(1)
+
+    return args
+
+
+def main(argv=sys.argv[1:]):
+    args = parse_args(argv)
+
+    vnf_list = create_vnfs_from_yaml_files(args.yaml_file_hdls)
+    ns_list = create_ns_from_yaml_files(args.yaml_file_hdls, vnf_list)
+
+    writer = DescriptorFileWriter(
+        module_list=['nsd', 'rw-nsd', 'vnfd', 'rw-vnfd'],
+        output_dir=args.outdir,
+        output_format=args.format,
+        )
+
+    for nw_svc in ns_list:
+        writer.write_descriptor(nw_svc, subdir="nsd")
+
+    for vnf in vnf_list:
+        writer.write_descriptor(vnf, subdir="vnfd")
+
+
+if __name__ == "__main__":
+    main()
+
diff --git a/models/openmano/test/tidgen_ns_2sriov.yaml b/models/openmano/test/tidgen_ns_2sriov.yaml
new file mode 100644 (file)
index 0000000..9c39816
--- /dev/null
@@ -0,0 +1,34 @@
+---
+name:            2tidgenMWC_2sriov
+description:     scenario with 2 tidgenMWC VNFs
+topology:
+  nodes:
+    tidgen1:                           #VNF name
+      type:      VNF
+      VNF model: tidgenMWC_2sriov      #VNF type
+    tidgen2:
+      type:      VNF
+      VNF model: tidgenMWC_2sriov
+    default:                    #Name of external network
+      type:      external_network
+      model:     default
+  connections:
+    mgmtnet:
+      nodes:
+      -  tidgen1: eth0
+      -  tidgen2: eth0
+    datanet0: 
+      nodes:
+      -  tidgen1: xe0
+      -  tidgen2: xe0
+    datanet1: 
+      nodes:
+      -  tidgen1: xe1
+      -  tidgen2: xe1
+    control-net:
+      nodes:
+      -   default: null
+      -   tidgen1: eth1
+      -   tidgen2: eth1
+
+
diff --git a/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml b/models/openmano/test/tidgen_ns_2sriov_no_ctrlnet.yaml
new file mode 100644 (file)
index 0000000..d174895
--- /dev/null
@@ -0,0 +1,29 @@
+---
+name:            2tidgenMWC_2sriov_no_ctrlnet
+description:     scenario with 2 tidgenMWC VNFs
+topology:
+  nodes:
+    tidgen1:                           #VNF name
+      type:      VNF
+      VNF model: tidgenMWC_2sriov_no_ctrlnet  #VNF type
+    tidgen2:
+      type:      VNF
+      VNF model: tidgenMWC_2sriov_no_ctrlnet
+    default:                    #Name of external network
+      type:      external_network
+      model:     default
+  connections:
+    mgmtnet:
+      nodes:
+      -  tidgen1: eth0
+      -  tidgen2: eth0
+    datanet0: 
+      nodes:
+      -  tidgen1: xe0
+      -  tidgen2: xe0
+    datanet1: 
+      nodes:
+      -  tidgen1: xe1
+      -  tidgen2: xe1
+
+
diff --git a/models/openmano/test/tidgen_ns_4sriov.yaml b/models/openmano/test/tidgen_ns_4sriov.yaml
new file mode 100644 (file)
index 0000000..4034f8a
--- /dev/null
@@ -0,0 +1,42 @@
+---
+name:            2tidgenMWC_4sriov
+description:     scenario with 2 tidgenMWC VNFs
+topology:
+  nodes:
+    tidgen1:                           #VNF name
+      type:      VNF
+      VNF model: tidgenMWC_4sriov      #VNF type
+    tidgen2:
+      type:      VNF
+      VNF model: tidgenMWC_4sriov
+    default:                    #Name of external network
+      type:      external_network
+      model:     default
+  connections:
+    mgmtnet:
+      nodes:
+      -  tidgen1: eth0
+      -  tidgen2: eth0
+    datanet0: 
+      nodes:
+      -  tidgen1: xe0
+      -  tidgen2: xe0
+    datanet1: 
+      nodes:
+      -  tidgen1: xe1
+      -  tidgen2: xe1
+    datanet2: 
+      nodes:
+      -  tidgen1: xe2
+      -  tidgen2: xe2
+    datanet3: 
+      nodes:
+      -  tidgen1: xe3
+      -  tidgen2: xe3
+    control-net:
+      nodes:
+      -   default: null
+      -   tidgen1: eth1
+      -   tidgen2: eth1
+
+
diff --git a/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml b/models/openmano/test/tidgen_ns_4sriov_no_ctrlnet.yaml
new file mode 100644 (file)
index 0000000..ee07a26
--- /dev/null
@@ -0,0 +1,33 @@
+---
+name:            2tidgenMWC_4sriov_no_ctrlnet
+description:     scenario with 2 tidgenMWC VNFs
+topology:
+  nodes:
+    tidgen1:                           #VNF name
+      type:      VNF
+      VNF model: tidgenMWC_4sriov_no_ctrlnet      #VNF type
+    tidgen2:
+      type:      VNF
+      VNF model: tidgenMWC_4sriov_no_ctrlnet
+    default:                    #Name of external network
+      type:      external_network
+      model:     default
+  connections:
+    datanet0: 
+      nodes:
+      -  tidgen1: xe0
+      -  tidgen2: xe0
+    datanet1: 
+      nodes:
+      -  tidgen1: xe1
+      -  tidgen2: xe1
+    datanet2: 
+      nodes:
+      -  tidgen1: xe2
+      -  tidgen2: xe2
+    datanet3: 
+      nodes:
+      -  tidgen1: xe3
+      -  tidgen2: xe3
+
+
diff --git a/models/openmano/test/tidgen_vnf_2sriov.yaml b/models/openmano/test/tidgen_vnf_2sriov.yaml
new file mode 100644 (file)
index 0000000..983490b
--- /dev/null
@@ -0,0 +1,58 @@
+---
+vnf:
+    name:        tidgenMWC_2sriov
+    description: tidgen for MWC2016; 12G 10 cores
+    class:       TID
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  eth0
+        description:       Bridge interface, request for dhcp
+    -   name:              eth1
+        type:              mgmt        # "mgmt"(autoconnect to management net), "bridge", "data"
+        VNFC:              tidgenMWC-VM # Virtual Machine this interface belongs to
+        local_iface_name:  eth1       # name inside this Virtual Machine
+        description:       Other management interface for general use
+    -   name:              xe0
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe0
+        description:       Data interface 1
+    -   name:              xe1
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe1
+        description:       Data interface 2
+    VNFC:                              # Virtual machine array
+    -   name:        tidgenMWC-VM       # name of Virtual Machine
+        disk: 10
+        description: tidgen for MWC 12G 10 cores
+        # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2
+        VNFC image: tidgenMWC
+        image metadata: {"use_incremental": "no" }  #is already incremental
+        processor:                     #Optional, leave it
+            model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+            features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        hypervisor:                    #Optional, leave it
+            type: QEMU-kvm
+            version: "10002|12001|2.6.32-358.el6.x86_64"
+        numas:
+        -   paired-threads: 5          # "cores", "paired-threads", "threads"
+            memory:         12         # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:10.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe1
+                vpci:      "0000:00:11.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"    # Optional
+            bandwidth: 1 Mbps          # Optional, informative only
+        -   name:      eth1
+            vpci:      "0000:00:0b.0"
+            bandwidth: 1 Mbps
diff --git a/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml b/models/openmano/test/tidgen_vnf_2sriov_no_ctrlnet.yaml
new file mode 100644 (file)
index 0000000..6c7df27
--- /dev/null
@@ -0,0 +1,50 @@
+---
+vnf:
+    name:        tidgenMWC_2sriov_no_ctrlnet
+    description: tidgen for MWC2016; 12G 10 cores
+    class:       TID
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  eth0
+        description:       Bridge interface, request for dhcp
+    -   name:              xe0
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe0
+        description:       Data interface 1
+    -   name:              xe1
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe1
+        description:       Data interface 2
+    VNFC:                              # Virtual machine array
+    -   name:        tidgenMWC-VM       # name of Virtual Machine
+        disk: 10
+        description: tidgen for MWC 12G 10 cores
+        # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2
+        VNFC image: tidgenMWC
+        image metadata: {"use_incremental": "no" }  #is already incremental
+        processor:                     #Optional, leave it
+            model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+            features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        hypervisor:                    #Optional, leave it
+            type: QEMU-kvm
+            version: "10002|12001|2.6.32-358.el6.x86_64"
+        numas:
+        -   paired-threads: 5          # "cores", "paired-threads", "threads"
+            memory:         12         # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:10.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe1
+                vpci:      "0000:00:11.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+            bandwidth: 1 Mbps
diff --git a/models/openmano/test/tidgen_vnf_4sriov.yaml b/models/openmano/test/tidgen_vnf_4sriov.yaml
new file mode 100644 (file)
index 0000000..af315d2
--- /dev/null
@@ -0,0 +1,76 @@
+---
+vnf:
+    name:        tidgenMWC_4sriov
+    description: tidgen for MWC2016; 12G 10 cores
+    class:       TID
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  eth0
+        description:       Bridge interface, request for dhcp
+    -   name:              eth1
+        type:              mgmt        # "mgmt"(autoconnect to management net), "bridge", "data"
+        VNFC:              tidgenMWC-VM # Virtual Machine this interface belongs to
+        local_iface_name:  eth1       # name inside this Virtual Machine
+        description:       Other management interface for general use
+    -   name:              xe0
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe0
+        description:       Data interface 1
+    -   name:              xe1
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe1
+        description:       Data interface 2
+    -   name:              xe2
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe2
+        description:       Data interface 3
+    -   name:              xe3
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe3
+        description:       Data interface 4
+    VNFC:                              # Virtual machine array
+    -   name:        tidgenMWC-VM       # name of Virtual Machine
+        disk: 10
+        description: tidgen for MWC 12G 10 cores
+        # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2
+        VNFC image: tidgenMWC
+        image metadata: {"use_incremental": "no" }  #is already incremental
+        processor:                     #Optional, leave it
+            model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+            features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        hypervisor:                    #Optional, leave it
+            type: QEMU-kvm
+            version: "10002|12001|2.6.32-358.el6.x86_64"
+        numas:
+        -   paired-threads: 5          # "cores", "paired-threads", "threads"
+            memory:         12         # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:10.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe1
+                vpci:      "0000:00:11.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+            -   name:      xe2
+                vpci:      "0000:00:12.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe3
+                vpci:      "0000:00:13.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"    # Optional
+            bandwidth: 1 Mbps          # Optional, informative only
+        -   name:      eth1
+            vpci:      "0000:00:0b.0"    # Optional
+            bandwidth: 1 Mbps          # Optional, informative only
diff --git a/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml b/models/openmano/test/tidgen_vnf_4sriov_no_ctrlnet.yaml
new file mode 100644 (file)
index 0000000..9cb9c4d
--- /dev/null
@@ -0,0 +1,68 @@
+---
+vnf:
+    name:        tidgenMWC_4sriov_no_ctrlnet
+    description: tidgen for MWC2016; 12G 10 cores
+    class:       TID
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  eth0
+        description:       Bridge interface, request for dhcp
+    -   name:              xe0
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe0
+        description:       Data interface 1
+    -   name:              xe1
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe1
+        description:       Data interface 2
+    -   name:              xe2
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe2
+        description:       Data interface 3
+    -   name:              xe3
+        type:              data
+        VNFC:              tidgenMWC-VM
+        local_iface_name:  xe3
+        description:       Data interface 4
+    VNFC:                              # Virtual machine array
+    -   name:        tidgenMWC-VM       # name of Virtual Machine
+        disk: 10
+        description: tidgen for MWC 12G 10 cores
+        # VNFC image: /mnt/powervault/virtualization/vnfs/tid/tidgenMWC.qcow2
+        VNFC image: tidgenMWC
+        image metadata: {"use_incremental": "no" }  #is already incremental
+        processor:                     #Optional, leave it
+            model: Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz
+            features: ["64b", "iommu", "lps", "tlbps", "hwsv", "dioc", "ht"]
+        hypervisor:                    #Optional, leave it
+            type: QEMU-kvm
+            version: "10002|12001|2.6.32-358.el6.x86_64"
+        numas:
+        -   paired-threads: 5          # "cores", "paired-threads", "threads"
+            memory:         12         # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:10.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe1
+                vpci:      "0000:00:11.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+            -   name:      xe2
+                vpci:      "0000:00:12.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov)
+                bandwidth: 10 Gbps
+            -   name:      xe3
+                vpci:      "0000:00:13.0"
+                dedicated: "no"
+                bandwidth: 10 Gbps
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"    # Optional
+            bandwidth: 1 Mbps          # Optional, informative only
diff --git a/models/plugins/CMakeLists.txt b/models/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..12de6a0
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 2014/12/11
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  yang
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/models/plugins/yang/CMakeLists.txt b/models/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..782415b
--- /dev/null
@@ -0,0 +1,71 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/15/2014
+# 
+
+# NOTE: These files will be used by the new MANO subsystem
+set(source_yang_files
+  ietf-l2-topology.yang
+  ietf-network-topology.yang
+  ietf-network.yang
+  nsd.yang rw-nsd.yang
+  nsr.yang rw-nsr.yang
+  pnfd.yang
+  rw-topology.yang
+  vld.yang rw-vld.yang
+  vlr.yang rw-vlr.yang
+  vnfd.yang rw-vnfd.yang
+  vnfr.yang rw-vnfr.yang
+  vnffgd.yang
+  )
+
+rift_add_yang_target(
+  TARGET mano-types_yang
+  YANG_FILES
+    mano-types.yang
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+rift_add_yang_target(
+  TARGET mano_yang
+  YANG_FILES ${source_yang_files}
+  GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwmanifest_yang_gen
+    rwschema_yang_gen
+    rwcloud_yang_gen
+    rwconfig_agent_yang_gen
+    mano-types_yang_gen
+  )
+
+#rift_gen_yang_tree(mano-pyang-trees
+#  OUTFILE_PREFIX mano.yang
+#  YANG_FILES
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnfd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vld.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/nsd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vnfd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-vld.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-nsd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/pnfd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/vnffgd.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-network-topology.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/ietf-l2-topology.yang
+#    ${RIFT_SUBMODULE_SOURCE_ROOT}/models/plugins/yang/rw-topology.yang
+#  )
diff --git a/models/plugins/yang/Makefile b/models/plugins/yang/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/models/plugins/yang/ietf-l2-topology.yang b/models/plugins/yang/ietf-l2-topology.yang
new file mode 100644 (file)
index 0000000..9f572cb
--- /dev/null
@@ -0,0 +1,578 @@
+
+/*
+ * NO RW COPYRIGHT
+ *
+ */
+
+module ietf-l2-topology {
+    yang-version 1;
+    namespace "urn:ietf:params:xml:ns:yang:ietf-l2-topology";
+    prefix "l2t";
+
+    import ietf-network {
+      prefix "nw";
+    }
+
+    import ietf-network-topology {
+      prefix "nt";
+    }
+
+    import ietf-inet-types {
+      prefix "inet";
+    }
+
+    import ietf-yang-types {
+      prefix "yang";
+    }
+
+    organization "TBD";
+    contact "I-D Editor: jie.dong@huawei.com";
+
+    description
+      "This module defines a basic model for
+       the layer-2 topology of a network";
+
+    revision "2015-06-23" {
+      description "Initial revision";
+      reference "draft-ietf-i2rs-l2-network-topology-01";
+    }
+
+   /*
+    * Typedefs
+    */
+
+    typedef vlan {
+      type uint16 {
+        range "0..4095";
+      }
+      description "VLAN ID";
+    }
+
+    typedef trill-nickname {
+      type uint16;
+      description "TRILL Nickname";
+    }
+
+    typedef flag-type {
+      type identityref {
+        base "flag-identity";
+      }
+      description "Base type for flags";
+    }
+
+    typedef l2-network-event-type {
+      type enumeration {
+        enum "add" {
+          value 0;
+          description "An L2 node or link or termination-point
+          has been added";
+        }
+        enum "remove" {
+          value 1;
+          description "An L2 node or link or termination-point
+          has been removed";
+        }
+        enum "update" {
+          value 2;
+          description "An L2 node or link or termination-point
+          has been updated";
+        }
+      }
+      description "l2 network event type for notifications";
+    } // l2-topology-event-type
+
+
+    /*
+    * Features
+    */
+
+    feature VLAN {
+      description
+        "Indicates that the system supports the
+         vlan functions";
+    }
+
+    feature QinQ {
+      description
+        "Indicates that the system supports the
+         qinq functions";
+    }
+
+    feature PBB {
+      description
+       "Indicates that the device supports the
+        provider-backbone-bridging functions";
+    }
+
+    feature VPLS {
+      description
+        "Indicates that the device supports the
+         VPLS functions";
+      reference "RFC 4761, RFC 4762";
+    }
+
+    feature TRILL {
+      description
+        "Indicates that the device supports the
+         TRILL functions";
+      reference "RFC 6325";
+    }
+
+    feature VXLAN {
+      description
+        "Indicates that the device supports the
+         VXLAN functions";
+      reference "RFC 7348";
+    }
+
+   /*
+    * Identities
+    */
+    identity flag-identity {
+      description "Base type for flags";
+    }
+
+    identity encapsulation-type {
+      description
+        "Base identity from which specific encapsulation
+         types are derived.";
+    }
+
+    identity eth-encapsulation-type {
+      base encapsulation-type;
+      description
+        "Base identity from which specific ethernet
+         encapsulation types are derived.";
+
+    }
+
+    identity ethernet {
+      base eth-encapsulation-type;
+      description
+        "native ethernet encapsulation";
+    }
+
+    identity vlan {
+      base eth-encapsulation-type;
+      description
+        "vlan encapsulation";
+    }
+
+    identity qinq {
+      base eth-encapsulation-type;
+      description
+        "qinq encapsulation";
+    }
+
+    identity pbb {
+      base eth-encapsulation-type;
+      description
+        "pbb encapsulation";
+    }
+
+    identity trill {
+      base eth-encapsulation-type;
+      description
+        "trill encapsulation";
+    }
+
+    identity vpls {
+      base eth-encapsulation-type;
+      description
+        "vpls encapsulation";
+    }
+
+    identity vxlan {
+      base eth-encapsulation-type;
+      description
+        "vxlan encapsulation";
+    }
+
+    identity frame-relay {
+      base encapsulation-type;
+      description
+        "Frame Relay encapsulation";
+    }
+
+    identity ppp {
+      base encapsulation-type;
+      description
+        "PPP encapsulation";
+    }
+
+    identity hdlc {
+      base encapsulation-type;
+      description
+        "HDLC encapsulation";
+    }
+
+    identity atm {
+      base encapsulation-type;
+      description
+        "Base identity from which specific ATM
+         encapsulation types are derived.";
+
+    }
+
+    identity pwe3 {
+      base encapsulation-type;
+      description
+        "Base identity from which specific pw
+         encapsulation types are derived.";
+    }
+
+
+   /*
+    * Groupings
+    */
+
+
+    grouping l2-network-type {
+      description "Identify the topology type to be L2.";
+      container l2-network {
+        presence "indicates L2 Network";
+        description
+        "The presence of the container node indicates
+         L2 Topology";
+      }
+    }
+
+    grouping l2-network-attributes {
+      description "L2 Topology scope attributes";
+      container l2-network-attributes {
+        description "Containing L2 network attributes";
+        leaf name {
+          type string;
+          description "Name of the L2 network";
+        }
+
+        leaf-list flag {
+          type flag-type;
+          description "L2 network flags";
+        }
+      }
+    }
+
+    grouping l2-node-attributes {
+      description "L2 node attributes";
+      container l2-node-attributes {
+        description "Containing L2 node attributes";
+        leaf name {
+          type string;
+          description "Node name";
+        }
+        leaf description {
+          type string;
+          description "Node description";
+        }
+        leaf-list management-address {
+          type inet:ip-address;
+          description "System management address";
+        }
+        leaf management-vid {
+          if-feature VLAN;
+          type vlan;
+          description "System management VID";
+        }
+        leaf-list nick-name {
+          if-feature TRILL;
+          type trill-nickname;
+          description "Nickname of the RBridge";
+        }
+        leaf-list flag {
+          type flag-type;
+          description "Node operational flags";
+        }
+      }
+    }  // grouping l2-node-attributes
+
+
+    grouping l2-link-attributes {
+      description "L2 link attributes";
+      container l2-link-attributes {
+        description "Containing L2 link attributes";
+        leaf name {
+          type string;
+          description "Link name";
+        }
+        leaf-list flag {
+          type flag-type;
+          description "Link flags";
+        }
+        leaf rate {
+          type decimal64 {
+            fraction-digits 2;
+          }
+          description "Link rate";
+
+        }
+        leaf delay {
+          type uint32;
+          description "Link delay in microseconds";
+        }
+        leaf-list srlg {
+          type uint32;
+          description
+            "List of Shared Risk Link Groups
+             this link belongs to.";
+        }
+      }
+    } // grouping l2-link-attributes
+
+    grouping l2-termination-point-attributes {
+      description "L2 termination point attributes";
+      container l2-termination-point-attributes {
+        description "Containing L2 TP attributes";
+        leaf description {
+          type string;
+          description "Port description";
+        }
+
+        leaf maximum-frame-size {
+          type uint32;
+          description "Maximum frame size";
+        }
+
+        choice l2-termination-point-type {
+          description
+            "Indicates termination-point type
+             specific attributes";
+          case ethernet {
+            leaf mac-address {
+              type yang:mac-address;
+              description "Interface MAC address";
+            }
+
+            leaf eth-encapsulation {
+              type identityref {
+                base eth-encapsulation-type;
+              }
+              description
+                "Encapsulation type of this
+                 ternimation point.";
+            }
+
+            leaf port-vlan-id {
+              if-feature VLAN;
+              type vlan;
+              description "Port VLAN ID";
+            }
+
+            list vlan-id-name {
+              if-feature VLAN;
+              key "vlan-id";
+              description "Interface configured VLANs";
+              leaf vlan-id {
+                type vlan;
+                description "VLAN ID";
+              }
+              leaf vlan-name {
+                type string;
+                description "VLAN Name";
+              }
+            }
+          } //case ethernet
+
+          case legacy {
+            leaf encapsulation {
+              type identityref {
+                base encapsulation-type;
+              }
+              description
+                "Encapsulation type of this termination point.";
+            }
+          } //case legacy
+
+        } //choice termination-point-type
+
+        leaf tp-state {
+          type enumeration {
+            enum in-use {
+              value 0;
+              description
+                "the termination point is in forwarding state";
+            }
+            enum blocking {
+              value 1;
+              description
+                "the termination point is in blocking state";
+            }
+            enum down {
+              value 2;
+              description
+                "the termination point is in down state";
+            }
+            enum others {
+              value 3;
+              description
+                "the termination point is in other state";
+            }
+          }
+          config false;
+          description "State of the termination point";
+        }
+      }
+    } // grouping l2-termination-point-attributes
+
+/*** grouping of network/node/link/tp leaf-refs ***/
+
+  grouping network-ref {
+    description
+      "Grouping for an absolute reference to a network topology
+      instance.";
+    leaf network-ref {
+      type leafref {
+        path "/nw:network/nw:network-id";
+      }
+      description
+        "An absolute reference to a network topology instance.";
+    }
+  }
+
+  grouping link-ref {
+    description
+      "Grouping for an absolute reference to a link instance.";
+      uses network-ref;
+      leaf link-ref {
+        type leafref {
+          path "/nw:network"
+            +"[nw:network-id = current()/../network-ref]"
+            +"/nt:link/nt:link-id";
+        }
+      description
+        "An absolute reference to a link instance.";
+    }
+  }
+
+  grouping node-ref {
+    description
+      "Grouping for an absolute reference to a node instance.";
+    uses network-ref;
+    leaf node-ref {
+      type leafref {
+        path "/nw:network"
+          +"[nw:network-id = current()/../network-ref]"
+          +"/nw:node/nw:node-id";
+      }
+      description
+        "An absolute reference to a node instance.";
+    }
+  }
+
+  grouping tp-ref {
+    description
+      "Grouping for an absolute reference to a termination point.";
+    uses node-ref;
+    leaf tp-ref {
+      type leafref {
+        path "/nw:network"
+          +"[nw:network-id = current()/../network-ref]"
+          +"/nw:node[nw:node-id = current()/../node-ref]"
+          +"/nt:termination-point/nt:tp-id";
+      }
+      description
+        "Grouping for an absolute reference to a TP.";
+    }
+  }
+
+
+   /*
+    * Data nodes
+    */
+    augment "/nw:network/nw:network-types" {
+      description
+        "Introduce new network type for L2 topology";
+      uses l2-network-type;
+    }
+
+    augment "/nw:network" {
+      /* RIFT-Change: when not to be used yet
+      when "nw:network-types/l2-network" {
+        description
+          "Augmentation parameters apply only for networks
+           with L2 topology";
+      }
+      */
+      description
+        "Configuration parameters for the L2 network
+         as a whole";
+      uses l2-network-attributes;
+    }
+
+    augment "/nw:network/nw:node" {
+      /* RIFT-Change: when not to be used yet
+      when "../nw:network-types/l2-network" {
+        description
+          "Augmentation parameters apply only for networks
+           with L2 topology";
+      }
+      */
+      description
+        "Configuration parameters for L2 at the node
+         level";
+      uses l2-node-attributes;
+    }
+
+    augment "/nw:network/nt:link" {
+      /* RIFT-Change: when not to be used yet
+      when "/nw:network/nw:network-types/l2-network" {
+        description
+          "Augmentation parameters apply only for networks
+           with L2 topology";
+      }
+      */
+      description "Augment L2 topology link information";
+      uses l2-link-attributes;
+    }
+
+    augment "/nw:network/nw:node/nt:termination-point" {
+      /* RIFT-Change: when not to be used yet
+      when "/nw:network/nw:network-types/l2-network" {
+        description
+          "Augmentation parameters apply only for networks
+           with L2 topology";
+      }
+      */
+      description
+        "Augment L2 topology termination point configuration";
+      uses l2-termination-point-attributes;
+    }
+
+   /*
+    * Notifications
+    */
+
+    notification l2-node-event {
+      description "Notification event for L2 node";
+      leaf event-type {
+        type l2-network-event-type;
+        description "Event type";
+      }
+      uses node-ref;
+      uses l2-network-type;
+      uses l2-node-attributes;
+    }
+
+    notification l2-link-event {
+      description "Notification event for L2 link";
+      leaf event-type {
+        type l2-network-event-type;
+        description "Event type";
+      }
+      uses link-ref;
+      uses l2-network-type;
+      uses l2-link-attributes;
+    }
+
+    notification l2-termination-point-event {
+      description "Notification event for L2 termination point";
+      leaf event-type {
+        type l2-network-event-type;
+        description "Event type";
+      }
+      uses tp-ref;
+      uses l2-network-type;
+      uses l2-termination-point-attributes;
+    }
+
+}  // module l2-topology
diff --git a/models/plugins/yang/ietf-network-topology.yang b/models/plugins/yang/ietf-network-topology.yang
new file mode 100644 (file)
index 0000000..e8f7c79
--- /dev/null
@@ -0,0 +1,257 @@
+
+/*
+ * NO RW COPYRIGHT
+ *
+ */
+
+module ietf-network-topology {
+   yang-version 1;
+   namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology";
+   prefix lnk;
+
+   import ietf-inet-types {
+     prefix inet;
+   }
+   import ietf-network {
+     prefix nd;
+   }
+
+   organization "TBD";
+   contact
+     "WILL-BE-DEFINED-LATER";
+   description
+     "This module defines a common base model for network topology,
+      augmenting the base network model with links to connect nodes,
+      as well as termination points to terminate links on nodes.";
+
+   revision 2015-06-08 {
+     description
+       "Initial revision.";
+     reference "draft-ietf-i2rs-yang-network-topo-01";
+   }
+
+   typedef link-id {
+     type inet:uri;
+     description
+       "An identifier for a link in a topology.
+        The identifier may be opaque.
+        The identifier SHOULD be chosen such that the same link in a
+        real network topology will always be identified through the
+        same identifier, even if the model is instantiated in
+            separate datastores. An implementation MAY choose to capture
+        semantics in the identifier, for example to indicate the type
+        of link and/or the type of topology that the link is a part
+        of.";
+   }
+
+   typedef tp-id {
+     type inet:uri;
+     description
+       "An identifier for termination points on a node.
+        The identifier may be opaque.
+        The identifier SHOULD be chosen such that the same TP in a
+        real network topology will always be identified through the
+        same identifier, even if the model is instantiated in
+        separate datastores. An implementation MAY choose to capture
+        semantics in the identifier, for example to indicate the type
+        of TP and/or the type of node and topology that the TP is a
+        part of.";
+   }
+
+   grouping link-ref {
+     description
+       "References a link in a specific network.";
+     leaf link-ref {
+       type leafref {
+         path "/nd:network[nd:network-id=current()/../"+
+           "nd:network-ref]/link/link-id";
+       }
+       description
+         "A type for an absolute reference a link instance.
+          (This type should not be used for relative references.
+          In such a case, a relative path should be used instead.)";
+     }
+     uses nd:network-ref;
+   }
+
+   grouping tp-ref {
+     description
+       "References a termination point in a specific node.";
+     leaf tp-ref {
+       type leafref {
+         path "/nd:network[nd:network-id=current()/../"+
+           "nd:network-ref]/nd:node[nd:node-id=current()/../"+
+           "nd:node-ref]/termination-point/tp-id";
+       }
+       description
+         "A type for an absolute reference to a termination point.
+          (This type should not be used for relative references.
+          In such a case, a relative path should be used instead.)";
+     }
+     uses nd:node-ref;
+   }
+
+   augment "/nd:network" {
+     description
+       "Add links to the network model.";
+     list link {
+       key "link-id";
+
+       description
+         "A Network Link connects a by Local (Source) node and
+          a Remote (Destination) Network Nodes via a set of the
+          nodes' termination points.
+          As it is possible to have several links between the same
+          source and destination nodes, and as a link could
+          potentially be re-homed between termination points, to
+          ensure that we would always know to distinguish between
+          links, every link is identified by a dedicated link
+          identifier.
+          Note that a link models a point-to-point link, not a
+          multipoint link.
+          Layering dependencies on links in underlay topologies are
+          not represented as the layering information of nodes and of
+          termination points is sufficient.";
+       container source {
+         description
+           "This container holds the logical source of a particular
+            link.";
+         leaf source-node {
+           type leafref {
+             // RIFT change: 
+             path "../../../../nd:network/nd:node/nd:node-id";
+           }
+           mandatory true;
+           description
+             "Source node identifier, must be in same topology.";
+         }
+         leaf source-tp {
+           type leafref {
+             // RIFT change: 
+             path "../../../../nd:network/nd:node[nd:node-id=current()/../"+
+               "source-node]/termination-point/tp-id";
+           }
+           description
+             "Termination point within source node that terminates
+              the link.";
+         }
+       }
+       container destination {
+         description
+           "This container holds the logical destination of a
+            particular link.";
+         leaf dest-node {
+           type leafref {
+             // RIFT change 
+             path "../../../../nd:network/nd:node/nd:node-id";
+           }
+           mandatory true;
+           description
+             "Destination node identifier, must be in the same
+              network.";
+         }
+         leaf dest-tp {
+           type leafref {
+             // RIFT change: 
+             path "../../../../nd:network/nd:node[nd:node-id=current()/../"+
+               "dest-node]/termination-point/tp-id";
+           }
+           description
+             "Termination point within destination node that
+              terminates the link.";
+         }
+       }
+       leaf link-id {
+         type link-id;
+         description
+           "The identifier of a link in the topology.
+            A link is specific to a topology to which it belongs.";
+       }
+       list supporting-link {
+         key "network-ref link-ref";
+         description
+           "Identifies the link, or links, that this link
+            is dependent on.";
+         leaf network-ref {
+           type leafref {
+             // RIFT change: 
+             path "../../../../nd:network/nd:supporting-network/nd:network-ref";
+           }
+           description
+             "This leaf identifies in which underlay topology
+              supporting link is present.";
+         }
+         leaf link-ref {
+           type leafref {
+             path "/nd:network[nd:network-id=current()/.."+
+               "/network-ref]/link/link-id";
+           }
+           description
+             "This leaf identifies a link which is a part
+              of this link's underlay. Reference loops, in which
+              a link identifies itself as its underlay, either
+              directly or transitively, are not allowed.";
+         }
+       }
+     }
+   }
+   augment "/nd:network/nd:node" {
+     description
+       "Augment termination points which terminate links.
+        Termination points can ultimately be mapped to interfaces.";
+     list termination-point {
+       key "tp-id";
+       description
+         "A termination point can terminate a link.
+          Depending on the type of topology, a termination point
+          could, for example, refer to a port or an interface.";
+       leaf tp-id {
+         type tp-id;
+         description
+           "Termination point identifier.";
+       }
+       list supporting-termination-point {
+         key "network-ref node-ref tp-ref";
+         description
+           "The leaf list identifies any termination points that
+            the termination point is dependent on, or maps onto.
+            Those termination points will themselves be contained
+            in a supporting node.
+            This dependency information can be inferred from
+            the dependencies between links.  For this reason,
+            this item is not separately configurable.  Hence no
+            corresponding constraint needs to be articulated.
+            The corresponding information is simply provided by the
+            implementing system.";
+         leaf network-ref {
+           type leafref {
+             // RIFT change:
+             path "/nd:network/nd:node/nd:supporting-node/nd:network-ref"; 
+           }
+           description
+             "This leaf identifies in which topology the
+              supporting termination point is present.";
+         }
+         leaf node-ref {
+           type leafref {
+             // RIFT change: 
+             path "/nd:network/nd:node/nd:supporting-node/nd:node-ref";
+           }
+           description
+             "This leaf identifies in which node the supporting
+              termination point is present.";
+         }
+         leaf tp-ref {
+           type leafref {
+             path "/nd:network[nd:network-id=current()/../"+
+               "network-ref]/nd:node[nd:node-id=current()/../"+
+               "node-ref]/termination-point/tp-id";
+           }
+           description
+             "Reference to the underlay node, must be in a
+              different topology";
+         }
+       }
+     }
+   }
+}
diff --git a/models/plugins/yang/ietf-network.tailf.yang b/models/plugins/yang/ietf-network.tailf.yang
new file mode 100644 (file)
index 0000000..f531f65
--- /dev/null
@@ -0,0 +1,27 @@
+
+/*
+ * NO RW COPYRIGHT
+ *
+ */
+
+module ietf-network-annotation
+{
+  namespace "urn:ietf:params:xml:ns:yang:ietf-network";
+  prefix "ietf-network-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import ietf-network {
+    prefix nd;
+  }
+
+  tailf:annotate "/nd:network" {
+      tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/nd:network/nd:server-provided" {
+      tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/models/plugins/yang/ietf-network.yang b/models/plugins/yang/ietf-network.yang
new file mode 100644 (file)
index 0000000..a059e94
--- /dev/null
@@ -0,0 +1,157 @@
+
+/*
+ * NO RW COPYRIGHT
+ *
+ */
+
+module ietf-network {
+  yang-version 1;
+  namespace "urn:ietf:params:xml:ns:yang:ietf-network";
+  prefix nd;
+
+  import ietf-inet-types {
+    prefix inet;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  organization "TBD";
+  contact
+    "WILL-BE-DEFINED-LATER";
+  description
+    "This module defines a common base model for a collection
+     of nodes in a network. Node definitions s are further used
+     in network topologies and inventories.";
+
+  revision 2015-06-08 {
+    description
+      "Initial revision.";
+    reference "draft-ietf-i2rs-yang-network-topo-01";
+  }
+
+  typedef node-id {
+    type inet:uri;
+    description
+      "Identifier for a node.";
+  }
+
+  typedef network-id {
+    type inet:uri;
+    description
+      "Identifier for a network.";
+  }
+
+  grouping network-ref {
+    description
+      "Contains the information necessary to reference a network,
+       for example an underlay network.";
+    leaf network-ref {
+      type leafref {
+        path "/network/network-id";
+      }
+      description
+        "Used to reference a network, for example an underlay
+         network.";
+    }
+  }
+
+  grouping node-ref {
+    description
+      "Contains the information necessary to reference a node.";
+    leaf node-ref {
+      type leafref {
+        path "/network[network-id=current()/../network-ref]"+
+          "/node/node-id";
+      }
+      description
+        "Used to reference a node.
+         Nodes are identified relative to the network they are
+         contained in.";
+    }
+    uses network-ref;
+  }
+
+  list network {
+    config false;
+    key "network-id";
+    description
+      "Describes a network.
+       A network typically contains an inventory of nodes,
+       topological information (augmented through
+       network-topology model), as well as layering
+       information.";
+    container network-types {
+      description
+        "Serves as an augmentation target.
+         The network type is indicated through corresponding
+         presence containers augmented into this container.";
+    }
+    leaf network-id {
+      type network-id;
+      description
+        "Identifies a network.";
+    }
+    leaf server-provided {
+      type boolean;
+      config false;
+      description
+        "Indicates whether the information concerning this
+         particular network is populated by the server
+         (server-provided true, the general case for network
+         information discovered from the server),
+         or whether it is configured by a client
+         (server-provided true, possible e.g. for
+         service overlays managed through a controller).";
+    }
+    list supporting-network {
+      key "network-ref";
+      description
+        "An underlay network, used to represent layered network
+         topologies.";
+
+      leaf network-ref {
+        type leafref {
+          path "/network/network-id";
+        }
+        description
+          "References the underlay network.";
+      }
+    }
+    list node {
+      key "node-id";
+      description
+        "The inventory of nodes of this network.";
+      leaf node-id {
+        type node-id;
+        description
+          "Identifies a node uniquely within the containing
+           network.";
+      }
+      list supporting-node {
+        key "network-ref node-ref";
+        description
+          "Represents another node, in an underlay network, that
+           this node is supported by.  Used to represent layering
+           structure.";
+        leaf network-ref {
+          type leafref {
+            path "../../../supporting-network/network-ref";
+          }
+          description
+            "References the underlay network that the
+             underlay node is part of.";
+        }
+        leaf node-ref {
+          type leafref {
+            path "/network/node/node-id";
+          }
+          description
+            "References the underlay node itself.";
+        }
+      }
+    }
+  }
+}
+
diff --git a/models/plugins/yang/mano-types.yang b/models/plugins/yang/mano-types.yang
new file mode 100644 (file)
index 0000000..cd5d446
--- /dev/null
@@ -0,0 +1,1991 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module mano-types
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:mano-types";
+  prefix "manotypes";
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  revision 2015-04-23 {
+    description
+      "Initial revision. This YANG file defines
+       the reusable base types for VNF Management
+       and Orchestration (MANO).";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  typedef parameter-data-type {
+    type enumeration {
+      enum STRING;
+      enum INTEGER;
+      enum BOOLEAN;
+    }
+  }
+
+  grouping primitive-parameter-value {
+    list parameter {
+      description
+          "List of parameters to the configuration primitive.";
+      key "name";
+      leaf name {
+        description
+            "Name of the parameter.";
+        type string;
+      }
+
+      leaf value {
+        description
+            "Value associated with the name.";
+        type string;
+      }
+    }
+  }
+
+  grouping primitive-parameter {
+    leaf name {
+      description
+          "Name of the parameter.";
+      type string;
+    }
+
+    leaf data-type {
+      description
+          "Data type associated with the name.";
+      type manotypes:parameter-data-type;
+    }
+
+    leaf mandatory {
+      description "Is this field mandatory";
+      type boolean;
+      default false;
+    }
+
+    leaf default-value {
+      description "The default value for this field";
+      type string;
+    }
+
+    leaf parameter-pool {
+      description "NSD Parameter pool name to use for this paramter";
+      type string;
+    }
+
+    leaf read-only {
+      description
+        "The value should be greyed out by the UI.
+        Only applies to parameters with default values.";
+      type boolean;
+    }
+
+    leaf hidden {
+      description
+        "The value should be hidden by the UI.
+        Only applies to parameters with default values.";
+      type boolean;
+    }
+  }
+  
+
+  grouping vnf-configuration {
+    container vnf-configuration {
+      rwpb:msg-new VnfConfiguration;
+      description
+          "Information regarding the VNF configuration 
+           is captured here. Note that if the NS contains
+           multiple instances of the same VNF, each instance
+           of the VNF may have different configuration";
+
+      choice config-method {
+        description
+            "Defines the configuration method for the VNF.";
+        case netconf {
+          description
+              "Use NETCONF for configuring the VNF.";
+          container netconf {
+            leaf target {
+              description
+                  "Netconf configuration target";
+              type enumeration {
+                enum running;
+                enum candidate;
+              }
+            }
+
+            leaf protocol {
+              description
+                  "Protocol to use for netconf (e.g. ssh)";
+              type enumeration {
+                enum None;
+                enum ssh;
+              }
+            }
+
+            leaf port {
+              description
+                  "Port for the netconf server.";
+              type inet:port-number;
+            }
+          }
+        }
+
+        case rest {
+          description
+              "Use REST for configuring the VNF.";
+          container rest {
+            leaf port {
+              description
+                  "Port for the REST server.";
+              type inet:port-number;
+            }
+          }
+        }
+
+        case script {
+          description
+              "Use custom script for configuring the VNF.
+               This script is executed in the context of 
+               Orchestrator.";
+          container script {
+            leaf script-type {
+              description
+                  "Script type - currently supported : bash, expect";
+              type enumeration {
+                enum bash;
+                enum expect;
+              }
+            }
+          }
+        }
+
+        case juju {
+          description
+            "Configure the VNF through Juju.";
+          container juju {
+            leaf charm {
+              description "Juju charm to use with the VNF.";
+              type string;
+            }
+          }
+        }
+      }
+
+      container config-access {
+        leaf mgmt-ip-address {
+          description
+              "IP address to be used to configure this VNF,
+               optional if it is possible to resolve dynamically.";
+          type inet:ip-address;
+        }
+
+        leaf username {
+          description 
+              "username for configuration.";
+          type string;
+        }
+
+        leaf password {
+          description 
+              "Password for configuration access authentication.";
+          type string;
+        }
+      }
+
+      container config-attributes {
+        description
+            "Miscelaneous input parameters to be considered
+             while processing the NSD to apply configuration";
+
+        leaf config-priority {
+          description
+              "Configuration priority - order of confgiration
+               to be applied to each VNF in this NS,
+               low number gets precedence over high number";
+          type uint64;
+        }
+
+        leaf config-delay {
+          description 
+              "Wait (seconds) before applying the configuration to VNF";
+          type uint64;
+        }
+      }
+
+      list service-primitive {
+        rwpb:msg-new ServicePrimitive;
+        description
+          "List of service primitives supported by the
+          configuration agent for this VNF.";
+        key "name";
+
+        leaf name {
+          description
+            "Name of the service primitive.";
+          type string;
+        }
+
+        list parameter {
+          description
+            "List of parameters to the service primitive.";
+          key "name";
+          uses primitive-parameter;
+        }
+      }
+
+      list initial-config-primitive {
+        rwpb:msg-new InitialConfigPrimitive;
+        description
+            "Initial set of configuration primitives.";
+        key "seq";
+        leaf seq {
+          description
+              "Sequence number for the configuration primitive.";
+          type uint64;
+        }
+
+        leaf name {
+          description 
+              "Name of the configuration primitive.";
+          type string;
+        }
+
+        list parameter {
+          key "name";
+          leaf name {
+            type string;
+          }
+
+          leaf value {
+            type string;
+          }
+        }
+      }
+
+      leaf config-template {
+        description
+            "Configuration template for each VNF";
+        type string;
+      }
+    }
+  } // END - grouping vnf-configuration
+
+  typedef virtual-link-type {
+    description
+        "Type of virtual link
+         ELAN: A multipoint service connecting a set of VNFs
+         // ELINE: For a simple point to point connection
+         //        between a VNF and the existing network.
+         // ETREE: A multipoint service connecting one or
+         //        more roots and a set of leaves, but
+         //        preventing inter-leaf communication.";
+    type enumeration {
+      enum ELAN;
+      // enum ETREE;
+      // enum ELINE;
+    }
+  }
+
+  grouping named-value {
+    leaf name {
+      type string;
+    }
+
+    leaf value {
+      type string;
+    }
+  }
+
+  typedef http-method {
+    description
+      "Type of HTTP operation";
+
+    type enumeration {
+      enum POST;
+      enum PUT;
+      enum GET;
+      enum DELETE;
+      enum OPTIONS;
+      enum PATCH;
+    }
+  }
+
+  typedef api-type {
+    description
+      "Type of API to fetch monitoring params";
+
+    type enumeration {
+      enum HTTP;
+      enum NETCONF;
+      enum SOAP;
+    }
+  }
+
+  typedef json-query-method {
+    description
+      "The method to extract a value from a JSON response
+
+       NAMEKEY - Use the name as the key for a non-nested value.
+       JSONPATH - Use jsonpath-rw implemenation to extract a value.
+       OBJECTPATH - Use objectpath implemenation to extract a value.";
+      type enumeration {
+        enum NAMEKEY;
+        enum JSONPATH;
+        enum OBJECTPATH;
+      }
+  }
+
+  typedef param-value-type {
+    description
+      "The type of the parameter value";
+    type enumeration {
+       enum INT;
+       enum DECIMAL;
+       enum STRING;
+    }
+  }
+
+  typedef connection-point-type {
+    description
+        "Type of connection point
+        VPORT: Virtual Port
+        // VNIC_ADDR: Virtual NIC Address
+        // PNIC_ADDR: Physical NIC Address
+        // PPORT: Phsical Port.";
+
+    type enumeration {
+      enum VPORT;
+    }
+  }
+
+  typedef widget-type {
+    description
+        "Type of the widget, typically used by the UI.";
+    type enumeration {
+      enum HISTOGRAM;
+      enum BAR;
+      enum GAUGE;
+      enum SLIDER;
+      enum COUNTER;
+      enum TEXTBOX;
+    }
+  }
+
+  typedef cpu-feature-type {
+    description
+        "Enumeration for CPU features.
+
+         AES: CPU supports advanced instruction set for
+         AES (Advanced Encryption Standard).
+
+         CAT: Cache Allocation Technology (CAT) allows
+         an Operating System, Hypervisor, or similar
+         system management agent to specify the amount
+         of L3 cache (currently the last-level cache
+         in most server and client platforms) space an
+         application can fill (as a hint to hardware
+         functionality, certain features such as power
+         management may override CAT settings).
+
+         CMT: Cache Monitoring Technology (CMT) allows
+         an Operating System, Hypervisor, or similar
+         system management agent to determine the
+         usage of cache based on applications running
+         on the platform. The implementation is
+         directed at L3 cache monitoring (currently
+         the last-level cache in most server and
+         client platforms).
+
+         DDIO: Intel Data Direct I/O (DDIO) enables
+         Ethernet server NICs and controllers talk
+         directly to the processor cache without a
+         detour via system memory. This enumeration
+         specifies if the VM requires a DDIO
+         capable host.";
+
+    type enumeration {
+      enum PREFER_AES;
+      enum REQUIRE_AES;
+      enum PREFER_CAT;
+      enum REQUIRE_CAT;
+      enum PREFER_CMT;
+      enum REQUIRE_CMT;
+      enum PREFER_DDIO;
+      enum REQUIRE_DDIO;
+      enum REQUIRE_VME;
+      enum PREFER_VME;
+      enum REQUIRE_DE;
+      enum PREFER_DE;
+      enum REQUIRE_PSE;
+      enum PREFER_PSE;
+      enum REQUIRE_TSC;
+      enum PREFER_TSC;
+      enum REQUIRE_MSR;
+      enum PREFER_MSR;
+      enum REQUIRE_PAE;
+      enum PREFER_PAE;
+      enum REQUIRE_MCE;
+      enum PREFER_MCE;
+      enum REQUIRE_CX8;
+      enum PREFER_CX8;
+      enum REQUIRE_APIC;
+      enum PREFER_APIC;
+      enum REQUIRE_SEP;
+      enum PREFER_SEP;
+      enum REQUIRE_MTRR;
+      enum PREFER_MTRR;
+      enum REQUIRE_PGE;
+      enum PREFER_PGE;
+      enum REQUIRE_MCA;
+      enum PREFER_MCA;
+      enum REQUIRE_CMOV;
+      enum PREFER_CMOV;
+      enum REQUIRE_PAT;
+      enum PREFER_PAT;
+      enum REQUIRE_PSE36;
+      enum PREFER_PSE36;
+      enum REQUIRE_CLFLUSH;
+      enum PREFER_CLFLUSH;
+      enum REQUIRE_DTS;
+      enum PREFER_DTS;
+      enum REQUIRE_ACPI;
+      enum PREFER_ACPI;
+      enum REQUIRE_MMX;
+      enum PREFER_MMX;
+      enum REQUIRE_FXSR;
+      enum PREFER_FXSR;
+      enum REQUIRE_SSE;
+      enum PREFER_SSE;
+      enum REQUIRE_SSE2;
+      enum PREFER_SSE2;
+      enum REQUIRE_SS;
+      enum PREFER_SS;
+      enum REQUIRE_HT;
+      enum PREFER_HT;
+      enum REQUIRE_TM;
+      enum PREFER_TM;
+      enum REQUIRE_IA64;
+      enum PREFER_IA64;
+      enum REQUIRE_PBE;
+      enum PREFER_PBE;
+      enum REQUIRE_RDTSCP;
+      enum PREFER_RDTSCP;
+      enum REQUIRE_PNI;
+      enum PREFER_PNI;
+      enum REQUIRE_PCLMULQDQ;
+      enum PREFER_PCLMULQDQ;
+      enum REQUIRE_DTES64;
+      enum PREFER_DTES64;
+      enum REQUIRE_MONITOR;
+      enum PREFER_MONITOR;
+      enum REQUIRE_DS_CPL;
+      enum PREFER_DS_CPL;
+      enum REQUIRE_VMX;
+      enum PREFER_VMX;
+      enum REQUIRE_SMX;
+      enum PREFER_SMX;
+      enum REQUIRE_EST;
+      enum PREFER_EST;
+      enum REQUIRE_TM2;
+      enum PREFER_TM2;
+      enum REQUIRE_SSSE3;
+      enum PREFER_SSSE3;
+      enum REQUIRE_CID;
+      enum PREFER_CID;
+      enum REQUIRE_FMA;
+      enum PREFER_FMA;
+      enum REQUIRE_CX16;
+      enum PREFER_CX16;
+      enum REQUIRE_XTPR;
+      enum PREFER_XTPR;
+      enum REQUIRE_PDCM;
+      enum PREFER_PDCM;
+      enum REQUIRE_PCID;
+      enum PREFER_PCID;
+      enum REQUIRE_DCA;
+      enum PREFER_DCA;
+      enum REQUIRE_SSE4_1;
+      enum PREFER_SSE4_1;
+      enum REQUIRE_SSE4_2;
+      enum PREFER_SSE4_2;
+      enum REQUIRE_X2APIC;
+      enum PREFER_X2APIC;
+      enum REQUIRE_MOVBE;
+      enum PREFER_MOVBE;
+      enum REQUIRE_POPCNT;
+      enum PREFER_POPCNT;
+      enum REQUIRE_TSC_DEADLINE_TIMER;
+      enum PREFER_TSC_DEADLINE_TIMER;
+      enum REQUIRE_XSAVE;
+      enum PREFER_XSAVE;
+      enum REQUIRE_AVX;
+      enum PREFER_AVX;
+      enum REQUIRE_F16C;
+      enum PREFER_F16C;
+      enum REQUIRE_RDRAND;
+      enum PREFER_RDRAND;
+      enum REQUIRE_FSGSBASE;
+      enum PREFER_FSGSBASE;
+      enum REQUIRE_BMI1;
+      enum PREFER_BMI1;
+      enum REQUIRE_HLE;
+      enum PREFER_HLE;
+      enum REQUIRE_AVX2;
+      enum PREFER_AVX2;
+      enum REQUIRE_SMEP;
+      enum PREFER_SMEP;
+      enum REQUIRE_BMI2;
+      enum PREFER_BMI2;
+      enum REQUIRE_ERMS;
+      enum PREFER_ERMS;
+      enum REQUIRE_INVPCID;
+      enum PREFER_INVPCID;
+      enum REQUIRE_RTM;
+      enum PREFER_RTM;
+      enum REQUIRE_MPX;
+      enum PREFER_MPX;
+      enum REQUIRE_RDSEED;
+      enum PREFER_RDSEED;
+      enum REQUIRE_ADX;
+      enum PREFER_ADX;
+      enum REQUIRE_SMAP;
+      enum PREFER_SMAP;
+    }
+  }
+
+  grouping vm-flavor {
+    container vm-flavor {
+      leaf vcpu-count {
+        description
+            "Number of vcpus for the VM.";
+        type uint16;
+      }
+
+      leaf memory-mb {
+        description
+            "Amount of memory in MB.";
+        type uint64;
+      }
+
+      leaf storage-gb {
+        description
+            "Amount of disk space in GB.";
+        type uint64;
+      }
+    }
+  } //grouping vm-flavor
+
+  grouping vswitch-epa {
+    container vswitch-epa {
+      leaf ovs-acceleration {
+        description
+            "Specifies Open vSwitch acceleration mode.
+             MANDATORY: OVS acceleration is required
+             PREFERRED: OVS acceleration is preferred";
+        type enumeration {
+          enum MANDATORY;
+          enum PREFERRED;
+          enum DISABLED;
+        }
+      }
+
+      leaf ovs-offload {
+        description
+            "Specifies Open vSwitch hardware offload mode.
+             MANDATORY: OVS offload is required
+             PREFERRED: OVS offload is preferred";
+        type enumeration {
+          enum MANDATORY;
+          enum PREFERRED;
+          enum DISABLED;
+        }
+      }
+    }
+  }
+
+  grouping hypervisor-epa {
+    container hypervisor-epa {
+      leaf type {
+        description
+            "Specifies the type of hypervisor.
+             KVM: KVM
+             XEN: XEN";
+        type enumeration {
+          enum PREFER_KVM;
+          enum REQUIRE_KVM;
+        }
+      }
+      leaf version {
+        type string;
+      }
+    }
+  }
+
+  grouping host-epa {
+    container host-epa {
+      description "Specifies the host level EPA attributes.";
+      leaf cpu-model {
+        description
+            "Host CPU model. Examples include: SandyBridge,
+             IvyBridge";
+        type enumeration {
+          enum PREFER_WESTMERE;
+          enum REQUIRE_WESTMERE;
+          enum PREFER_SANDYBRIDGE;
+          enum REQUIRE_SANDYBRIDGE;
+          enum PREFER_IVYBRIDGE;
+          enum REQUIRE_IVYBRIDGE;
+          enum PREFER_HASWELL;
+          enum REQUIRE_HASWELL;
+          enum PREFER_BROADWELL;
+          enum REQUIRE_BROADWELL;
+          enum PREFER_NEHALEM;
+          enum REQUIRE_NEHALEM;
+          enum PREFER_PENRYN;
+          enum REQUIRE_PENRYN;
+          enum PREFER_CONROE;
+          enum REQUIRE_CONROE;
+          enum PREFER_CORE2DUO;
+          enum REQUIRE_CORE2DUO;
+        }
+      }
+
+      leaf cpu-arch {
+        description "Host CPU architecture.";
+        type enumeration {
+          enum PREFER_X86;
+          enum REQUIRE_X86;
+          enum PREFER_X86_64;
+          enum REQUIRE_X86_64;
+          enum PREFER_I686;
+          enum REQUIRE_I686;
+          enum PREFER_IA64;
+          enum REQUIRE_IA64;
+          enum PREFER_ARMV7;
+          enum REQUIRE_ARMV7;
+          enum PREFER_ARMV8;
+          enum REQUIRE_ARMV8;
+        }
+      }
+
+      leaf cpu-vendor {
+        description "Host CPU Vendor.";
+        type enumeration {
+          enum PREFER_INTEL;
+          enum REQUIRE_INTEL;
+          enum PREFER_AMD;
+          enum REQUIRE_AMD;
+        }
+      }
+
+      leaf cpu-socket-count {
+        description "Number of sockets on the host.";
+        type uint64;
+      }
+
+      leaf cpu-core-count {
+        description "Number of cores on the host.";
+        type uint64;
+      }
+
+      leaf cpu-core-thread-count {
+        description "Number of threads per cores on the host.";
+        type uint64;
+      }
+      
+      leaf-list cpu-feature {
+        description
+            "List of CPU features.";
+        type cpu-feature-type;
+      }
+
+      
+      leaf om-cpu-model-string {
+        description "Openmano CPU model string";
+        type string;
+      }
+
+      leaf-list om-cpu-feature {
+        description "Openmano CPU features";
+        type string;
+      }
+    }
+  }
+
+  grouping guest-epa {
+    description "EPA attributes for the guest";
+    container guest-epa {
+      leaf trusted-execution {
+        description "This VM should be allocated from trusted pool";
+        type boolean;
+      }
+
+      leaf mempage-size {
+        description
+            "Memory page allocation size. If a VM requires
+             hugepages, it should choose LARGE or SIZE_2MB
+             or SIZE_1GB. If the VM prefers hugepages it
+             should chose PREFER_LARGE.
+             LARGE        : Require hugepages (either 2MB or 1GB)
+             SMALL        : Doesn't require hugepages
+             SIZE_2MB     : Requires 2MB hugepages
+             SIZE_1GB     : Requires 1GB hugepages
+             PREFER_LARGE : Application perfers hugepages";
+        type enumeration {
+          enum LARGE;
+          enum SMALL;
+          enum SIZE_2MB;
+          enum SIZE_1GB;
+          enum PREFER_LARGE;
+        }
+      }
+
+      leaf cpu-pinning-policy {
+        description
+            "CPU pinning policy describes association
+             between virtual CPUs in guest and the
+             physical CPUs in the host.
+             DEDICATED : Virtual CPUs are pinned to
+                         physical CPUs
+             SHARED    : Multiple VMs may share the
+                         same physical CPUs.
+             ANY       : Any policy is acceptable for the VM";
+        type enumeration {
+          enum DEDICATED;
+          enum SHARED;
+          enum ANY;
+        }
+        default "ANY";
+      }
+
+      leaf cpu-thread-pinning-policy {
+          description
+            "CPU thread pinning policy describes how to
+             place the guest CPUs when the host supports
+             hyper threads:
+             AVOID   : Avoids placing a guest on a host
+                       with threads.
+             SEPARATE: Places vCPUs on separate cores,
+                       and avoids placing two vCPUs on
+                       two threads of same core.
+             ISOLATE : Places each vCPU on a different core,
+                       and places no vCPUs from a different
+                       guest on the same core.
+             PREFER  : Attempts to place vCPUs on threads
+                       of the same core.";
+        type enumeration {
+          enum AVOID;
+          enum SEPARATE;
+          enum ISOLATE;
+          enum PREFER;
+        }
+      }
+
+      list pcie-device {
+        description
+            "List of pcie passthrough devices.";
+        key device-id;
+        leaf device-id {
+          description
+              "Device identifier.";
+          type string;
+        }
+        leaf count {
+          description
+              "Number of devices to attach to the VM.";
+          type uint64;
+        }
+      }
+
+      choice numa-policy {
+        case numa-unware {
+          leaf numa-unware {
+            type empty;
+          }
+        }
+
+        case numa-aware {
+          container numa-node-policy {
+            description
+                "This policy defines numa topology of the
+                 guest. Specifically identifies if the guest
+                 should be run on a host with one numa
+                 node or multiple numa nodes. As an example
+                 a guest may want 8 vcpus and 4 GB of
+                 memory. But may want the vcpus and memory
+                 distributed across multiple numa nodes.
+                 The NUMA node 1 may run with 6 vcpus and
+                 3GB, and NUMA node 2 may run with 2 vcpus
+                 and 1GB.";
+
+            leaf node-cnt {
+              description
+                  "The number of numa nodes to expose to the VM.";
+              type uint16;
+            }
+
+            leaf mem-policy {
+              description
+                  "This policy specifies how the memory should
+                   be allocated in a multi-node scenario.
+                   STRICT    : The memory must be allocated
+                               strictly from the memory attached
+                               to the NUMA node.
+                   PREFERRED : The memory should be allocated
+                               perferentially from the memory
+                               attached to the NUMA node";
+              type enumeration {
+                enum STRICT;
+                enum PREFERRED;
+              }
+            }
+
+           list node {
+              key id;
+              leaf id {
+                description
+                    "NUMA node identification. Typically
+                     it's 0 or 1";
+                type uint64;
+              }
+
+              leaf-list vcpu {
+                description
+                    "List of vcpus to allocate on
+                     this numa node.";
+                type uint64;
+              }
+
+              leaf memory-mb {
+                description
+                    "Memory size expressed in MB
+                     for this NUMA node.";
+                type uint64;
+              }
+
+              choice om-numa-type {
+                description
+                    "Openmano Numa type selection";
+
+                case cores {
+                  leaf num-cores {
+                    type uint8;
+                  }
+                }
+
+                case paired-threads {
+                  container paired-threads {
+                    leaf num-paired-threads {
+                      type uint8;
+                    }
+
+                    list paired-thread-ids {
+                      description
+                          "List of thread pairs to use in case of paired-thread numa";
+                      max-elements 16;
+                      key thread-a;
+
+                      leaf thread-a {
+                          type uint8;
+                      }
+
+                      leaf thread-b {
+                          type uint8;
+                      }
+                    }
+                  }
+                }
+                case threads {
+                  leaf num-threads {
+                    type uint8;
+                  }
+                }
+              }
+            }
+
+          }
+        }
+      }
+    }
+  }
+
+  grouping provider-network {
+    container provider-network {
+      description "Container for the provider network.";
+      leaf physical-network {
+        description
+            "Name of the phsyical network on which the provider
+             network is built.";
+        type string;
+      }
+
+      leaf overlay-type {
+        description
+            "Type of the overlay network.";
+        type enumeration {
+          enum LOCAL;
+          enum FLAT;
+          enum VLAN;
+          enum VXLAN;
+          enum GRE;
+        }
+      }
+      leaf segmentation_id {
+        description
+            "Segmentation ID";
+            type uint32;
+      }
+    }
+  }
+
+  grouping ns-service-primitive {
+    list service-primitive {
+      description
+          "Network service level service primitives.";
+
+      key "name";
+
+      leaf name {
+        description
+            "Name of the service primitive.";
+        type string;
+      }
+
+      list parameter {
+        description
+            "List of parameters for the service primitive.";
+
+        key "name";
+        uses manotypes:primitive-parameter;
+      }
+
+      list parameter-group {
+        description
+            "Grouping of parameters which are logically grouped in UI";
+        key "name";
+
+        leaf name {
+          description
+              "Name of the parameter group";
+          type string;
+        }
+
+        list parameter {
+          description
+              "List of parameters for the service primitive.";
+          key "name";
+          uses manotypes:primitive-parameter;
+        }
+
+        leaf mandatory {
+          description "Is this parameter group mandatory";
+          type boolean;
+          default true;
+        }
+      }
+
+      list vnf-primitive-group {
+        description
+            "List of service primitives grouped by VNF.";
+
+        key "member-vnf-index-ref";
+        leaf member-vnf-index-ref {
+          description
+              "Reference to member-vnf within constituent-vnfds";
+          type uint64;
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a 
+               leafref to path:
+                   ../../../../nsd:constituent-vnfd
+                   + [nsd:id = current()/../nsd:id-ref]
+                   + /nsd:vnfd-id-ref
+               NOTE: An issue with confd is preventing the
+               use of xpath. Seems to be an issue with leafref
+               to leafref, whose target is in a different module.
+               Once that is resovled this will switched to use
+               leafref";
+
+          type string;
+        }
+
+        leaf vnfd-name {
+          description
+              "Name of the VNFD";
+          type string;
+        }
+
+        list primitive {
+          key "index";
+
+          leaf index {
+            description "Index of this primitive";
+            type uint32;
+          }
+
+          leaf name {
+            description "Name of the primitive in the VNF primitive ";
+            type string;
+          }
+        }
+      }
+
+      leaf user-defined-script {
+        description
+            "A user defined script.";
+        type string;
+      }
+    }
+  }
+
+  grouping monitoring-param {
+    list http-endpoint {
+      description
+          "List of http endpoints to be used by monitoring params";
+      key path;
+
+      leaf path {
+        description "The HTTP path on the management server";
+        type string;
+      }
+
+      leaf https {
+        description "Pick HTTPS instead of HTTP , Default is false";
+        type boolean;
+        default "false";
+      }
+
+      leaf port {
+        description "The HTTP port to connect to";
+        type inet:port-number;
+      }
+
+      leaf username {
+        description "The HTTP basic auth username";
+        type string;
+      }
+
+      leaf password {
+        description "The HTTP basic auth password";
+        type string;
+      }
+
+      leaf polling_interval_secs {
+        description "The HTTP polling interval in seconds";
+        type uint8;
+        default 2;
+      }
+
+      leaf method {
+        description
+          "This is the method to be performed at the uri.
+           GET by default for action";
+
+        type manotypes:http-method;
+        default "GET";
+      }
+
+      list headers {
+        description "Custom HTTP headers to put on HTTP request";
+        key key;
+        leaf key{
+          description "HTTP header key";
+          type string;
+        }
+
+        leaf value{
+          description "HTTP header value";
+          type string;
+        }
+      }
+    }
+
+    list monitoring-param {
+      description
+          "List of monitoring parameters at the NS level";
+      key id;
+      leaf id {
+        type string;
+      }
+
+      leaf name {
+        type string;
+      }
+
+      leaf http-endpoint-ref {
+        type leafref {
+          path "../../http-endpoint/path";
+        }
+      }
+
+      leaf json-query-method {
+        type json-query-method;
+        default "NAMEKEY";
+      }
+
+      container json-query-params {
+        leaf json-path {
+          description
+            "The jsonpath to use to extract value from JSON structure";
+          type string;
+        }
+        leaf object-path {
+          description
+            "The objectpath to use to extract value from JSON structure";
+          type string;
+        }
+      }
+
+      uses monitoring-param-ui-data;
+      uses monitoring-param-value;
+
+    }
+  }
+
+  grouping monitoring-param-aggregation {
+    typedef aggregation-type {
+      description "aggregation-type";
+      type enumeration {
+        enum AVERAGE;
+        enum MINIMUM;
+        enum MAXIMUM;
+        enum COUNT;
+        enum SUM;
+      }
+    }
+
+    leaf aggregation-type {
+      type aggregation-type;
+    }
+  }
+
+  grouping monitoring-param-ui-data {
+      leaf description {
+        type string;
+      }
+
+      leaf group-tag {
+        description "A simple tag to group monitoring parameters";
+        type string;
+      }
+
+
+      leaf widget-type {
+        type manotypes:widget-type;
+      }
+
+      leaf units {
+        type string;
+      }
+  }
+
+  grouping monitoring-param-value {
+      leaf value-type {
+        type param-value-type;
+        default "INT";
+      }
+
+      container numeric-constraints {
+        leaf min-value {
+          description
+              "Minimum value for the parameter";
+          type uint64;
+        }
+        leaf max-value {
+          description
+              "Maxium value for the parameter";
+          type uint64;
+        }
+      }
+
+      container text-constraints {
+        leaf min-length {
+          description
+              "Minimum string length for the parameter";
+          type uint8;
+        }
+        leaf max-length {
+          description
+              "Maximum string length for the parameter";
+          type uint8;
+        }
+      }
+
+      leaf value-integer {
+        description
+            "Current value for an integer parameter";
+        type int64;
+      }
+
+      leaf value-decimal {
+        description
+            "Current value for a decimal parameter";
+        type decimal64 {
+          fraction-digits 4;
+        }
+      }
+
+      leaf value-string {
+        description
+            "Current value for a string parameter";
+        type string;
+      }
+  }
+
+  grouping control-param {
+    list control-param {
+      description
+          "List of control parameters to manage and
+           update the running configuration of the VNF";
+      key id;
+
+      leaf id {
+        type string;
+      }
+
+      leaf name {
+        type string;
+      }
+
+      leaf description {
+        type string;
+      }
+
+      leaf group-tag {
+        description "A simple tag to group control parameters";
+        type string;
+      }
+
+      leaf min-value {
+        description
+            "Minimum value for the parameter";
+        type uint64;
+      }
+
+      leaf max-value {
+        description
+            "Maxium value for the parameter";
+        type uint64;
+      }
+
+      leaf current-value {
+        description
+            "Current value for the parameter";
+        type uint64;
+      }
+
+      leaf step-value {
+        description
+            "Step value for the parameter";
+        type uint64;
+      }
+
+      leaf units {
+        type string;
+      }
+
+      leaf widget-type {
+        type manotypes:widget-type;
+      }
+
+      leaf url {
+        description
+          "This is the URL where to perform the operation";
+
+        type inet:uri;
+      }
+
+      leaf method {
+        description
+          "This is the method to be performed at the uri.
+           POST by default for action";
+
+        type manotypes:http-method;
+        default "POST";
+      }
+
+      leaf payload {
+        description
+          "This is the operation payload or payload template as stringified
+           JSON. This field provides the data  to be sent for this operation
+           call";
+
+        type string;
+      }
+    }
+  }
+
+  grouping action-param {
+    list action-param {
+      description
+          "List of action parameters to
+           control VNF";
+      key id;
+      leaf id {
+        type string;
+      }
+
+      leaf name {
+        type string;
+      }
+
+      leaf description {
+        type string;
+      }
+
+      leaf group-tag {
+        description "A simple tag to group monitoring parameter";
+        type string;
+      }
+
+      leaf url {
+        description
+          "This is the URL where to perform the operation";
+        type inet:uri;
+      }
+
+      leaf method {
+        description
+          "This is the method to be performed at the uri.
+           POST by default for action";
+
+        type manotypes:http-method;
+        default "POST";
+      }
+
+      leaf payload {
+        description
+          "This is the operation payload or payload template to be sent in
+           the data for this operation call";
+
+        type string;
+      }
+    }
+  }
+
+  grouping input-parameter {
+    description "";
+
+    list input-parameter {
+      description
+          "List of input parameters";
+
+      key xpath;
+
+
+      leaf xpath {
+        description
+          "A an xpath that specfies which element in a descriptor is to be
+          modified.";
+        type string;
+      }
+
+      leaf value {
+        description
+          "The value that the element specified by the xpath should take when a
+          record is created.";
+        type string;
+      }
+    }
+  }
+
+  grouping input-parameter-xpath {
+    list input-parameter-xpath {
+      description
+          "List of xpaths to parameters inside the NSD
+           the can be customized during the instantiation.";
+
+      key "xpath";
+      leaf xpath {
+        description
+            "An xpath that specifies the element in a descriptor.";
+        type string;
+      }
+
+      leaf label {
+        description "A descriptive string";
+        type string;
+      }
+
+      leaf default-value {
+        description " A default value for this input parameter";
+        type string;
+      }
+    }
+  }
+
+  grouping nfvi-metrics {
+    container vcpu {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "VCPU";
+      }
+
+      leaf total {
+        description
+          "The total number of VCPUs available.";
+        type uint64;
+      }
+
+      leaf utilization {
+        description
+          "The VCPU utilization (percentage).";
+        type decimal64 {
+          fraction-digits 2;
+          range "0 .. 100";
+        }
+      }
+    }
+
+    container memory {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "MEMORY";
+      }
+
+      leaf used {
+        description
+          "The amount of memory (bytes) currently in use.";
+        type uint64;
+      }
+
+      leaf total {
+        description
+          "The amount of memory (bytes) available.";
+        type uint64;
+      }
+
+      leaf utilization {
+        description
+          "The memory utilization (percentage).";
+        type decimal64 {
+          fraction-digits 2;
+          range "0 .. 100";
+        }
+      }
+    }
+
+    container storage {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "STORAGE";
+      }
+
+      leaf used {
+        description
+          "The amount of storage (bytes) currently in use.";
+        type uint64;
+      }
+
+      leaf total {
+        description
+          "The amount of storage (bytes) available.";
+        type uint64;
+      }
+
+      leaf utilization {
+        description
+          "The storage utilization (percentage).";
+        type decimal64 {
+          fraction-digits 2;
+          range "0 .. 100";
+        }
+      }
+    }
+
+    container external-ports {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "EXTERNAL PORTS";
+      }
+
+      leaf total {
+        description
+          "The total number of external ports.";
+        type uint64;
+      }
+    }
+
+    container internal-ports {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "INTERNAL PORTS";
+      }
+
+      leaf total {
+        description
+          "The total number of internal ports.";
+        type uint64;
+      }
+    }
+
+    container network {
+      leaf label {
+        description
+          "Label to show in UI";
+        type string;
+        default "NETWORK TRAFFIC";
+      }
+
+      container incoming {
+        leaf label {
+          description
+            "Label to show in UI";
+          type string;
+          default "INCOMING NETWORK TRAFFIC";
+        }
+
+        leaf bytes {
+          description
+            "The cumulative number of incoming bytes.";
+          type uint64;
+        }
+
+        leaf packets {
+          description
+            "The cumulative number of incoming packets.";
+          type uint64;
+        }
+
+        leaf byte-rate {
+          description
+            "The current incoming byte-rate (bytes per second).";
+          type decimal64 {
+            fraction-digits 2;
+          }
+        }
+
+        leaf packet-rate {
+          description
+            "The current incoming packet (packets per second).";
+          type decimal64 {
+            fraction-digits 2;
+          }
+        }
+      }
+
+      container outgoing {
+        leaf label {
+          description
+            "Label to show in UI";
+          type string;
+          default "OUTGOING NETWORK TRAFFIC";
+        }
+
+        leaf bytes {
+          description
+            "The cumulative number of outgoing bytes.";
+          type uint64;
+        }
+
+        leaf packets {
+          description
+            "The cumulative number of outgoing packets.";
+          type uint64;
+        }
+
+        leaf byte-rate {
+          description
+            "The current outgoing byte-rate (bytes per second).";
+          type decimal64 {
+            fraction-digits 2;
+          }
+        }
+
+        leaf packet-rate {
+          description
+            "The current outgoing packet (packets per second).";
+          type decimal64 {
+            fraction-digits 2;
+          }
+        }
+      }
+    }
+  }
+
+  typedef alarm-severity-type {
+    description "An indication of the importance or ugency of the alarm";
+    type enumeration {
+      enum LOW;
+      enum MODERATE;
+      enum CRITICAL;
+    }
+  }
+
+  typedef alarm-metric-type {
+    description "The type of metrics to register the alarm for";
+    type enumeration {
+      enum CPU_UTILIZATION;
+      enum MEMORY_UTILIZATION;
+      enum STORAGE_UTILIZATION;
+    }
+  }
+
+  typedef alarm-statistic-type {
+    description
+        "The type of statistic to used to measure a metric to determine
+        threshold crossing for an alarm.";
+    type enumeration {
+      enum AVERAGE;
+      enum MINIMUM;
+      enum MAXIMUM;
+      enum COUNT;
+      enum SUM;
+    }
+  }
+
+  typedef alarm-operation-type {
+    description
+        "The relational operator used to define whether an alarm should be
+        triggered when, say, the metric statistic goes above or below a
+        specified value.";
+    type enumeration {
+      enum GE; // greater than or equal
+      enum LE; // less than or equal
+      enum GT; // greater than
+      enum LT; // less than
+      enum EQ; // equal
+    }
+  }
+
+  grouping alarm {
+    leaf alarm-id {
+      description
+          "This field is reserved for the identifier assigned by the cloud
+          provider";
+
+      type string;
+    }
+
+    leaf name {
+      description "A human readable string to identify the alarm";
+      type string;
+    }
+
+    leaf description {
+      description "A string containing a description of this alarm";
+      type string;
+    }
+
+    leaf vdur-id {
+      description
+          "The identifier of the VDUR that the alarm is associated with";
+      type string;
+    }
+
+    container actions {
+      list ok {
+        key "url";
+        leaf url {
+          type string;
+        }
+      }
+
+      list insufficient-data {
+        key "url";
+        leaf url {
+          type string;
+        }
+      }
+
+      list alarm {
+        key "url";
+        leaf url {
+          type string;
+        }
+      }
+    }
+
+    leaf repeat {
+      description
+          "This flag indicates whether the alarm should be repeatedly emitted
+          while the associated threshold has been crossed.";
+
+      type boolean;
+      default true;
+    }
+
+    leaf enabled {
+      description
+          "This flag indicates whether the alarm has been enabled or
+          disabled.";
+
+      type boolean;
+      default true;
+    }
+
+    leaf severity {
+      description "A measure of the important or urgency of the alarm";
+      type alarm-severity-type;
+    }
+
+    leaf metric {
+      description "The metric to be tracked by this alarm.";
+      type alarm-metric-type;
+    }
+
+    leaf statistic {
+      description "The type of metric statistic that is tracked by this alarm";
+      type alarm-statistic-type;
+    }
+
+    leaf operation {
+      description
+          "The relational operator that defines whether the alarm should be
+          triggered when the metric statistic is, say, above or below the
+          specified threshold value.";
+      type alarm-operation-type;
+    }
+
+    leaf value {
+      description
+          "This value defines the threshold that, if crossed, will trigger
+          the alarm.";
+      type decimal64 {
+        fraction-digits 4;
+      }
+    }
+
+    leaf period {
+      description
+          "The period defines the length of time (seconds) that the metric
+          data are collected over in oreder to evaluate the chosen
+          statistic.";
+      type uint32;
+    }
+
+    leaf evaluations {
+      description
+          "This is the number of samples of the metric statistic used to
+          evaluate threshold crossing. Each sample or evaluation is equal to
+          the metric statistic obtained for a given period. This can be used
+          to mitigate spikes in the metric that may skew the statistic of
+          interest.";
+      type uint32;
+    }
+  }
+
+  typedef cloud-account-type {
+    description "cloud account type";
+    type enumeration {
+      enum aws;
+      enum cloudsim;
+      enum cloudsim_proxy;
+      enum mock;
+      enum openmano;
+      enum openstack;
+      enum vsphere;
+      enum openvim;
+    }
+  }
+  
+  grouping host-aggregate {
+    list host-aggregate {
+      description "Name of the Host Aggregate";
+      key "metadata-key";
+      
+      leaf metadata-key {
+        type string;
+      }
+      leaf metadata-value {
+        type string;
+      }
+    }
+  }
+  
+  grouping placement-group-input {
+    leaf cloud-type {
+      type manotypes:cloud-account-type;
+    }
+    choice cloud-provider {
+      case openstack {           
+        container availability-zone {
+          description "Name of the Availability Zone";
+          leaf name {
+            type string;
+          }
+        }
+        container server-group {
+          description "Name of the Affinity/Anti-Affinity Server Group";
+          leaf name {
+            type string;
+          }
+        }
+        uses host-aggregate;
+      }
+      case aws {
+        leaf aws-construct {
+          type empty;
+        }
+      }
+      case openmano {
+        leaf openmano-construct {
+          type empty;
+        }        
+      }
+      case vsphere {
+        leaf vsphere-construct {
+          type empty;
+        }
+      }
+      case mock {
+        leaf mock-construct {
+          type empty;
+        }
+      }
+      case cloudsim {
+        leaf cloudsim-construct {
+          type empty;
+        }
+      }
+    }
+  }
+  
+  grouping placement-group-info {
+    description "";
+
+    leaf name {
+      description
+          "Place group construct to define the compute resource placement strategy
+           in cloud environment";
+      type string;
+    }
+
+    leaf requirement {
+      description "This is free text space used to describe the intent/rationale
+                   behind this placement group. This is for human consumption only";
+      type string;
+    }
+    
+    leaf strategy {
+      description
+          "Strategy associated with this placement group
+             Following values are possible
+               - COLOCATION: Colocation strategy imply intent to share the physical
+                             infrastructure (hypervisor/network) among all members
+                             of this group.
+               - ISOLATION: Isolation strategy imply intent to not share the physical
+                            infrastructure (hypervisor/network) among the members
+                            of this group.
+             ";
+      type enumeration {
+        enum COLOCATION;
+        enum ISOLATION;
+      }
+      default "COLOCATION";
+    }
+  }
+
+  grouping ip-profile-info {
+    description "Grouping for IP-Profile";
+    container ip-profile-params {
+      
+      leaf ip-version {
+        type inet:ip-version;
+        default ipv4;
+      }
+
+      leaf subnet-address {
+        description "Subnet IP prefix associated with IP Profile";
+        type inet:ip-prefix;
+      }
+
+      leaf gateway-address {
+        description "IP Address of the default gateway associated with IP Profile";
+        type inet:ip-address;
+      }
+
+      leaf security-group {
+        description "Name of the security group";
+        type string;
+      }
+
+      leaf-list dns-server {
+        description "List of DNS Servers associated with IP Profile";
+        type inet:ip-address;
+      }
+
+      container dhcp-params {  
+        leaf enabled {
+          description "This flag indicates if DHCP is enabled or not";
+          type boolean;
+          default true;
+        }
+
+        leaf start-address {
+          description "Start IP address of the IP-Address range associated with DHCP domain";
+          type inet:ip-address;
+        }
+
+        leaf count {
+          description "Size of the DHCP pool associated with DHCP domain";
+          type uint32;
+        }
+      }
+
+      leaf subnet-prefix-pool {
+        description "VIM Specific reference to pre-created subnet prefix";
+        type string;
+      }
+    }
+  }
+
+  grouping ip-profile-list {
+    list ip-profiles {
+      description
+          "List of IP Profiles.
+             IP Profile describes the IP characteristics for the Virtual-Link";
+    
+      key "name";
+
+      leaf name {
+        description "Name of the IP-Profile";
+        type string;
+      }
+      
+      leaf description {
+        description "Description for IP profile";
+        type string;
+      }
+      
+      uses ip-profile-info;
+    }
+  }
+  
+}
diff --git a/models/plugins/yang/nsd.yang b/models/plugins/yang/nsd.yang
new file mode 100644 (file)
index 0000000..5fffa45
--- /dev/null
@@ -0,0 +1,917 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module nsd
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:nsd";
+  prefix "nsd";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2014-10-27 {
+    description
+      "Initial revision. This YANG file defines
+       the Network Service Descriptor (NSD)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  typedef scaling-trigger {
+    type enumeration {
+      enum pre-scale-in {
+        value 1;
+      }
+      enum post-scale-in {
+        value 2;
+      }
+      enum pre-scale-out {
+        value 3;
+      }
+      enum post-scale-out {
+        value 4;
+      }
+    }
+  }
+
+  typedef scaling-policy-type {
+    type enumeration {
+      enum manual {
+        value 1;
+      }
+      enum automatic {
+        value 2;
+      }
+    }
+  }
+
+  typedef scaling-criteria-operation {
+    type enumeration {
+      enum AND {
+        value 1;
+      }
+      enum OR {
+        value 2;
+      }
+    }
+  }
+
+  grouping primitive-parameter {
+    leaf name {
+      description
+          "Name of the parameter.";
+      type string;
+    }
+
+    leaf data-type {
+      description
+          "Data type associated with the name.";
+      type manotypes:parameter-data-type;
+    }
+
+    leaf mandatory {
+      description "Is this field mandatory";
+      type boolean;
+      default false;
+    }
+
+    leaf default-value {
+      description "The default value for this field";
+      type string;
+    }
+
+    leaf parameter-pool {
+      description "NSD Parameter pool name to use for this paramter";
+      type string;
+    }
+  }
+
+  grouping ns-initial-config-primitive {
+    leaf seq {
+      description
+          "Sequence number for the configuration primitive.";
+      type uint64;
+    }
+
+    leaf name {
+      description
+          "Name of the configuration primitive.";
+      type string;
+      mandatory "true";
+    }
+
+    leaf user-defined-script {
+      description
+          "A user defined script.";
+      type string;
+    }
+
+    list parameter {
+      key "name";
+      leaf name {
+        type string;
+      }
+
+      leaf value {
+        type string;
+      }
+    }
+  }
+
+  grouping nsd-descriptor {
+    leaf id {
+      description "Identifier for the NSD.";
+      type string;
+    }
+
+    leaf name {
+      description "NSD name.";
+      mandatory true;
+      type string;
+    }
+
+    leaf short-name {
+      description "NSD short name.";
+      type string;
+    }
+
+    leaf vendor {
+      description "Vendor of the NSD.";
+      type string;
+    }
+
+    leaf logo {
+      description
+        "File path for  the vendor specific logo. For example icons/mylogo.png.
+         The logo  should be part of the network service";
+      type string;
+    }
+
+    leaf description {
+      description "Description of the NSD.";
+      type string;
+    }
+
+    leaf version {
+      description "Version of the NSD";
+      type string;
+    }
+
+    list connection-point {
+      description
+          "List for external connection points.
+          Each NS has one or more external connection
+          points. As the name implies that external
+          connection points are used for connecting
+          the NS to other NS or to external networks.
+          Each NS exposes these connection points to
+          the orchestrator. The orchestrator can
+          construct network service chains by
+          connecting the connection points between
+          different NS.";
+
+      key "name";
+      leaf name {
+        description
+            "Name of the NS connection point.";
+        type string;
+      }
+
+      leaf type {
+        description
+            "Type of the connection point.";
+        type manotypes:connection-point-type;
+      }
+    }
+
+    /* Still having issues modelling this,
+       see the comments under vnfd-connection-point-ref
+     */
+    list vld {
+      description
+          "List of Virtual Link Descriptors.";
+
+      key "id";
+
+      leaf id {
+        description
+            "Identifier for the VLD.";
+        type string;
+      }
+
+      leaf name {
+        description
+            "Virtual Link Descriptor (VLD) name.";
+        type string;
+      }
+
+      leaf short-name {
+        description
+            "Short name for VLD for UI";
+        type string;
+      }
+
+      leaf vendor {
+        description "Provider of the VLD.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VLD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VLD";
+        type string;
+      }
+
+      leaf type {
+        type manotypes:virtual-link-type;
+      }
+
+      leaf root-bandwidth {
+        description
+            "For ELAN this is the aggregate bandwidth.";
+        type uint64;
+      }
+
+      leaf leaf-bandwidth {
+        description
+            "For ELAN this is the bandwidth of branches.";
+        type uint64;
+      }
+
+      list vnfd-connection-point-ref {
+        description
+            "A list of references to connection points.";
+        key "member-vnf-index-ref";
+
+        leaf member-vnf-index-ref {
+          description "Reference to member-vnf within constituent-vnfds";
+          type leafref {
+            path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+         leaf vnfd-id-ref {
+           description
+               "A reference to a vnfd. This is a
+                leafref to path:
+                    ../../nsd:constituent-vnfd
+                    + [nsd:id = current()/../nsd:id-ref]
+                    + /nsd:vnfd-id-ref
+                NOTE: An issue with confd is preventing the
+                use of xpath. Seems to be an issue with leafref
+                to leafref, whose target is in a different module.
+                Once that is resovled this will switched to use
+                leafref";
+           type string;
+         }
+
+         leaf vnfd-connection-point-ref {
+           description
+               "A reference to a connection point name
+                in a vnfd. This is a leafref to path:
+                    /vnfd:vnfd-catalog/vnfd:vnfd
+                    + [vnfd:id = current()/../nsd:vnfd-id-ref]
+                    + /vnfd:connection-point/vnfd:name
+                NOTE: An issue with confd is preventing the
+                use of xpath. Seems to be an issue with leafref
+                to leafref, whose target is in a different module.
+                Once that is resovled this will switched to use
+                leafref";
+            type string;
+        }
+      }
+
+      // replicate for pnfd container here
+      uses manotypes:provider-network;
+
+      choice init-params {
+        description "Extra parameters for VLD instantiation";
+
+        case vim-network-ref {
+          leaf vim-network-name {
+            description
+                "Name of network in VIM account. This is used to indicate
+                   pre-provisioned network name in cloud account.";
+            type string;
+          }
+        }
+        case vim-network-profile {
+          leaf ip-profile-ref {
+            description "Named reference to IP-profile object";
+            type string;
+          } 
+        }   
+      }
+    }
+
+    list constituent-vnfd {
+      description
+          "List of VNFDs that are part of this
+          network service.";
+
+      key "member-vnf-index";
+
+      leaf member-vnf-index {
+        description
+          "Identifier/index for the VNFD. This separate id
+           is required to ensure that multiple VNFs can be
+           part of single NS";
+        type uint64;
+      }
+
+      leaf vnfd-id-ref {
+        description
+          "Identifier for the VNFD.";
+        type leafref {
+          path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+        }
+      }
+
+      leaf start-by-default {
+        description
+          "VNFD is started as part of the NS instantiation";
+        type boolean;
+        default true;
+      }
+    }
+
+    list scaling-group-descriptor {
+      description
+          "scaling group descriptor within this network service.
+           The scaling group defines a group of VNFs,
+           and the ratio of VNFs in the network service
+           that is used as target for scaling action";
+
+      key "name";
+
+      leaf name {
+        description "Name of this scaling group.";
+        type string;
+      }
+
+      list scaling-policy {
+
+        key "name";
+
+        leaf name {
+          description
+              "Name of the scaling policy";
+          type string;
+        }
+
+        leaf scaling-type {
+          description
+              "Type of scaling";
+          type scaling-policy-type;
+        }
+
+        leaf enabled {
+          description
+            "Specifies if the scaling policy can be applied";
+          type boolean;
+          default true;
+        }
+
+        leaf scale-in-operation-type {
+          description
+              "Operation to be applied to check between scaling criterias to 
+               check if the scale in threshold condition has been met.
+               Defaults to AND";
+          type scaling-criteria-operation;
+          default AND;
+        }
+
+        leaf scale-out-operation-type {
+          description
+              "Operation to be applied to check between scaling criterias to 
+               check if the scale out threshold condition has been met.
+               Defauls to OR";
+          type scaling-criteria-operation;
+          default OR;
+        }
+
+        leaf threshold-time {
+          description
+            "The duration for which the criteria must hold true";
+          type uint32;
+          mandatory true;
+        }
+
+        leaf cooldown-time {
+          description
+            "The duration after a scaling-in/scaling-out action has been
+            triggered, for which there will be no further optional";
+          type uint32;
+          mandatory true;
+        }
+
+        list scaling-criteria {
+          description
+              "list of conditions to be met for generating scaling
+                 requests";
+          key "name";
+
+          leaf name {
+            type string;
+          }
+
+          leaf scale-in-threshold {
+            description
+                "Value below which scale-in requests are generated";
+            type uint64;
+          }
+
+          leaf scale-out-threshold {
+            description
+                "Value above which scale-out requests are generated";
+            type uint64;
+          }
+
+          leaf ns-monitoring-param-ref {
+            description 
+               "Reference to the NS level monitoring parameter
+                that is aggregated";
+            type leafref {
+              path "../../../../monitoring-param/id";
+            }
+          }
+        }
+      }
+
+      list vnfd-member {
+        description "List of VNFs in this scaling group";
+        key "member-vnf-index-ref";
+
+        leaf member-vnf-index-ref {
+          description "member VNF index of this member VNF";
+          type leafref {
+            path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf count {
+          description
+            "count of this member VNF  within this scaling group.
+             The count allows to define  the number of instances
+             when a scaling action targets this scaling group";
+          type uint32;
+          default 1;
+        }
+      }
+
+      leaf min-instance-count {
+        description
+          "Minimum instances of the scaling group which are allowed.
+          These instances are created by default when the network service
+          is instantiated.";
+        type uint32;
+        default 0;
+      }
+
+      leaf max-instance-count {
+        description
+          "Maximum instances of this scaling group that are allowed
+           in a single network service. The network service scaling
+           will fail, when the number of service group instances
+           exceed the max-instance-count specified.";
+        type uint32;
+        default 10;
+      }
+
+      list scaling-config-action {
+        description "List of scaling config actions";
+        key "trigger";
+
+        leaf trigger {
+          description "scaling trigger";
+          type scaling-trigger;
+        }
+
+        leaf ns-config-primitive-name-ref {
+          description "Reference to the NS config name primitive";
+          type leafref {
+            path "../../../service-primitive/name";
+          }
+        }
+      }
+    }
+
+    list placement-groups {
+      description "List of placement groups at NS level";
+
+      key "name";
+      uses manotypes:placement-group-info;
+
+      list member-vnfd {
+        description
+            "List of VNFDs that are part of this placement group";
+
+        key "member-vnf-index-ref";
+
+        leaf member-vnf-index-ref {
+          description "member VNF index of this member VNF";
+          type leafref {
+            path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "Identifier for the VNFD.";
+          type leafref {
+            path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+          }
+        }
+      }
+    }
+
+    uses manotypes:ip-profile-list;
+
+    list vnf-dependency {
+      description
+          "List of VNF dependencies.";
+      key vnf-source-ref;
+      leaf vnf-source-ref {
+        type leafref {
+          path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+        }
+      }
+      leaf vnf-depends-on-ref {
+        description
+            "Reference to VNF that sorce VNF depends.";
+        type leafref {
+          path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+        }
+      }
+    }
+
+    list vnffgd {
+      description
+          "List of VNF Forwarding Graph Descriptors (VNFFGD).";
+
+      key "id";
+
+      leaf id {
+        description
+            "Identifier for the VNFFGD.";
+        type string;
+      }
+
+      leaf name {
+        description
+            "VNFFGD name.";
+        type string;
+      }
+
+      leaf short-name {
+        description
+            "Short name for VNFFGD for UI";
+        type string;
+      }
+
+      leaf vendor {
+        description "Provider of the VNFFGD.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VNFFGD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VNFFGD";
+        type string;
+      }
+
+      list rsp {
+        description
+            "List of Rendered Service Paths (RSP).";
+
+        key "id";
+
+        leaf id {
+          description
+              "Identifier for the RSP.";
+          type string;
+        }
+
+        leaf name {
+          description
+              "RSP name.";
+          type string;
+        }
+
+        list vnfd-connection-point-ref {
+          description
+                "A list of references to connection points.";
+          key "member-vnf-index-ref";
+
+          leaf member-vnf-index-ref {
+            description "Reference to member-vnf within constituent-vnfds";
+            type leafref {
+              path "../../../../constituent-vnfd/member-vnf-index";
+            }
+          }
+
+          leaf order {
+            type uint8;
+            description
+                "A number that denotes the order of a VNF in a chain";
+          }
+
+           leaf vnfd-id-ref {
+             description
+                 "A reference to a vnfd. This is a
+                  leafref to path:
+                      ../../../../nsd:constituent-vnfd
+                      + [nsd:id = current()/../nsd:id-ref]
+                      + /nsd:vnfd-id-ref
+                  NOTE: An issue with confd is preventing the
+                  use of xpath. Seems to be an issue with leafref
+                  to leafref, whose target is in a different module.
+                  Once that is resovled this will switched to use
+                  leafref";
+             type string;
+           }
+
+           leaf vnfd-connection-point-ref {
+             description
+                 "A reference to a connection point name
+                  in a vnfd. This is a leafref to path:
+                      /vnfd:vnfd-catalog/vnfd:vnfd
+                      + [vnfd:id = current()/../nsd:vnfd-id-ref]
+                      + /vnfd:connection-point/vnfd:name
+                  NOTE: An issue with confd is preventing the
+                  use of xpath. Seems to be an issue with leafref
+                  to leafref, whose target is in a different module.
+                  Once that is resovled this will switched to use
+                  leafref";
+              type string;
+          }
+        }
+      } //rsp
+
+      list classifier {
+        description
+            "List of classifier rules.";
+
+        key "id";
+
+        leaf id {
+          description
+              "Identifier for the classifier rule.";
+          type string;
+        }
+
+        leaf name {
+          description
+              "Name of the classifier.";
+          type string;
+        }
+
+        leaf rsp-id-ref {
+          description
+              "A reference to the RSP.";
+          type leafref {
+            path "../../rsp/id";
+          }
+        }
+
+        leaf member-vnf-index-ref {
+          description "Reference to member-vnf within constituent-vnfds";
+          type leafref {
+            path "../../../constituent-vnfd/member-vnf-index";
+          }
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a
+                  leafref to path:
+                      ../../../nsd:constituent-vnfd
+                      + [nsd:id = current()/../nsd:id-ref]
+                      + /nsd:vnfd-id-ref
+                  NOTE: An issue with confd is preventing the
+                  use of xpath. Seems to be an issue with leafref
+                  to leafref, whose target is in a different module.
+                  Once that is resovled this will switched to use
+                  leafref";
+          type string;
+        }
+
+        leaf vnfd-connection-point-ref {
+          description
+              "A reference to a connection point name
+                  in a vnfd. This is a leafref to path:
+                      /vnfd:vnfd-catalog/vnfd:vnfd
+                      + [vnfd:id = current()/../nsd:vnfd-id-ref]
+                      + /vnfd:connection-point/vnfd:name
+                  NOTE: An issue with confd is preventing the
+                  use of xpath. Seems to be an issue with leafref
+                  to leafref, whose target is in a different module.
+                  Once that is resovled this will switched to use
+                  leafref";
+          type string;
+        }
+
+        list match-attributes {
+          description
+              "List of match attributes.";
+
+          key "id";
+
+          leaf id {
+            description
+                "Identifier for the classifier match attribute rule.";
+            type string;
+          }
+
+          leaf ip-proto {
+            description
+                "IP Protocol.";
+            type uint8;
+          }
+
+          leaf source-ip-address {
+            description
+                "Source IP address.";
+            type inet:ip-address;
+          }
+
+          leaf destination-ip-address {
+            description
+                "Destination IP address.";
+            type inet:ip-address;
+          }
+
+          leaf source-port {
+            description
+                "Source port number.";
+            type inet:port-number;
+          }
+
+          leaf destination-port {
+            description
+                "Destination port number.";
+            type inet:port-number;
+          }
+          //TODO: Add more match criteria
+        } //match-attributes
+      } // classifier
+    } // vnffgd
+
+    list monitoring-param {
+      description
+        "List of monitoring parameters from VNF's that should be
+        propogated up into NSR";
+      key "id";
+
+      leaf id {
+        type string;
+      }
+
+      leaf name {
+        type string;
+      }
+
+      uses manotypes:monitoring-param-value;
+      uses manotypes:monitoring-param-ui-data;
+      uses manotypes:monitoring-param-aggregation;
+
+      list vnfd-monitoring-param {
+        description "A list of VNFD monitoring params";
+        key "vnfd-id-ref vnfd-monitoring-param-ref";
+
+        leaf vnfd-id-ref {
+          description
+             "A reference to a vnfd. This is a
+              leafref to path:
+                  ../../../../nsd:constituent-vnfd
+                  + [nsd:id = current()/../nsd:id-ref]
+                  + /nsd:vnfd-id-ref
+              NOTE: An issue with confd is preventing the
+              use of xpath. Seems to be an issue with leafref
+              to leafref, whose target is in a different module.
+              Once that is resolved this will switched to use
+              leafref";
+
+          type yang:uuid;
+        }
+
+        leaf vnfd-monitoring-param-ref {
+          description "A reference to the VNFD monitoring param";
+          type leafref {
+            path "/vnfd:vnfd-catalog/vnfd:vnfd"
+              + "[vnfd:id = current()/../vnfd-id-ref]"
+              + "/vnfd:monitoring-param/vnfd:id";
+          }
+        }
+
+        leaf-list member-vnf-index-ref {
+         description
+            "Optional reference to member-vnf within constituent-vnfds";
+         type uint64;
+        }
+      }
+    }
+
+    uses manotypes:input-parameter-xpath;
+
+    list parameter-pool {
+      description
+        "Pool of parameter values which must be
+         pulled from during configuration";
+      key "name";
+
+      leaf name {
+        description
+            "Name of the configuration value pool";
+        type string;
+      }
+
+      container range {
+        description
+            "Create a range of values to populate the pool with";
+
+        leaf start-value {
+          description
+              "Generated pool values start at this value";
+          type uint32;
+          mandatory true;
+        }
+
+        leaf end-value {
+          description
+              "Generated pool values stop at this value";
+          type uint32;
+          mandatory true;
+        }
+      }
+    }
+
+    uses manotypes:ns-service-primitive;
+
+    list initial-config-primitive {
+      rwpb:msg-new NsdInitialConfigPrimitive;
+      description
+        "Initial set of configuration primitives for NSD.";
+      key "seq";
+
+      uses ns-initial-config-primitive;
+    }
+  }
+
+  container nsd-catalog {
+
+    list nsd {
+      key "id";
+
+      uses nsd-descriptor;
+
+    }
+  }
+
+}
diff --git a/models/plugins/yang/nsr.tailf.yang b/models/plugins/yang/nsr.tailf.yang
new file mode 100644 (file)
index 0000000..b68872e
--- /dev/null
@@ -0,0 +1,52 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module nsr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/nsr-annotation";
+  prefix "nsr-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import nsr {
+    prefix nsr;
+  }
+
+  tailf:annotate "/nsr:ns-instance-opdata" {
+    tailf:callpoint rw_callpoint;
+  }
+  tailf:annotate "/nsr:exec-ns-service-primitive" {
+     tailf:actionpoint rw_actionpoint;
+  }
+  tailf:annotate "/nsr:exec-scale-out" {
+     tailf:actionpoint rw_actionpoint;
+  }
+  tailf:annotate "/nsr:exec-scale-in" {
+     tailf:actionpoint rw_actionpoint;
+  }
+  tailf:annotate "/nsr:get-ns-service-primitive-values" {
+     tailf:actionpoint rw_actionpoint;
+  }
+  tailf:annotate "/nsr:start-network-service" {
+     tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/models/plugins/yang/nsr.yang b/models/plugins/yang/nsr.yang
new file mode 100644 (file)
index 0000000..2e12788
--- /dev/null
@@ -0,0 +1,1376 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module nsr
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:nsr";
+  prefix "nsr";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import vlr {
+    prefix "vlr";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import nsd {
+    prefix "nsd";
+  }
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import vnfr {
+    prefix "vnfr";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import rw-sdn {
+    prefix "rwsdn";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines
+       the Network Service Record (NSR)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  typedef config-states {
+    type enumeration {
+      enum init;
+      enum configuring;
+      enum config_not_needed;
+      enum configured;
+      enum failed;
+    }
+  }
+
+  typedef trigger-type {
+    type enumeration {
+      enum ns-primitive;
+      enum vnf-primitive;
+    }
+  }
+
+  grouping cloud-config {
+    description "List of cloud config parameters";
+
+    list ssh-authorized-key {
+      key "key-pair-ref";
+
+      description "List of authorized ssh keys as part of cloud-config";
+
+      leaf key-pair-ref {
+        description "A reference to the key pair entry in the global key pair table";
+        type leafref {
+          path "/nsr:key-pair/nsr:name";
+        }
+      }
+    }
+    list user {
+      key "name";
+
+      description "List of users to be added through cloud-config";
+      leaf name {
+        description "Name of the user ";
+        type string;
+      }
+      leaf gecos {
+        description "The user name's real name";
+        type string;
+      }
+      leaf passwd {
+        description "The user password";
+        type string;
+      }
+    }
+  }
+
+  list key-pair {
+    key "name";
+    description "Used to configure the list of public keys to be injected as part 
+                 of ns instantiation";
+    leaf name {
+      description "Name of this key pair";
+      type string;
+    }
+
+    leaf key {
+      description "Key associated with this key pair";
+      type string;
+    }
+  }
+
+  rpc start-network-service {
+    description "Start the network service";
+    input {
+      leaf name {
+        mandatory true;
+        description "Name of the Network Service"; 
+        type string;
+      }
+      leaf nsd-ref {
+        description "Reference to NSR ID ref";
+        mandatory true;
+        type leafref {
+          path "/nsd:nsd-catalog/nsd:nsd/nsd:id";
+        }
+      }
+      uses ns-instance-config-params;
+    }
+
+    output {
+      leaf nsr-id {
+        description "Automatically generated parameter";
+        type yang:uuid;
+      }
+    }
+  }
+
+  container ns-instance-config {
+
+    list nsr {
+      key "id";
+      unique "name";
+
+      leaf id {
+        description "Identifier for the NSR.";
+        type yang:uuid;
+      }
+
+      leaf name {
+        description "NSR name.";
+        type string;
+      }
+
+      leaf short-name {
+        description "NSR short name.";
+        type string;
+      }
+
+      leaf description {
+        description "NSR description.";
+        type string;
+      }
+
+      leaf admin-status {
+        description
+          "This is the administrative status of the NS instance";
+
+        type enumeration {
+          enum ENABLED;
+          enum DISABLED;
+        }
+      }
+
+      container nsd {
+        description "NS descriptor used to instantiate this NS";
+        uses nsd:nsd-descriptor;
+      }
+
+      uses ns-instance-config-params;
+    }
+  }
+
+  grouping ns-instance-config-params {
+    uses manotypes:input-parameter;
+
+    list scaling-group {
+      description "List of ns scaling group instances";
+      key "scaling-group-name-ref";
+
+      leaf scaling-group-name-ref {
+        description "name of the scaling group
+        leafref path ../../nsd/scaling-group-descriptor/name";
+        type string;
+      }
+
+      list instance {
+        description "The instance of the scaling group";
+        key "id";
+        leaf id {
+          description "Scaling group instance uuid";
+          type uint16;
+        }
+      }
+    }
+
+    list nsd-placement-group-maps {
+      description
+          "Mapping from mano-placement groups construct from NSD to cloud
+          platform placement group construct";
+
+      key "placement-group-ref";
+
+      leaf placement-group-ref {
+        description "Reference for NSD placement group
+            leafref path ../../nsd/placement-groups/name";
+        type string;
+      }
+      uses manotypes:placement-group-input;
+    }
+
+   list vnfd-placement-group-maps {
+      description
+          "Mapping from mano-placement groups construct from VNFD to cloud
+          platform placement group construct";
+
+    key "placement-group-ref vnfd-id-ref";
+
+    leaf vnfd-id-ref {
+      description 
+          "A reference to a vnfd. This is a 
+          leafref to path:
+          ../../../../nsd:constituent-vnfd
+          + [nsr:id = current()/../nsd:id-ref]
+          + /nsd:vnfd-id-ref
+          NOTE: An issue with confd is preventing the
+          use of xpath. Seems to be an issue with leafref
+          to leafref, whose target is in a different module.
+          Once that is resovled this will switched to use
+          leafref";
+      type yang:uuid;
+    }
+
+    leaf placement-group-ref {
+      description
+          "A reference to VNFD placement group";
+      type leafref {
+        path "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = current()/" +
+            "../nsr:vnfd-id-ref]/vnfd:placement-groups/vnfd:name";
+      }
+    }
+
+    uses manotypes:placement-group-input;
+   }
+   uses cloud-config;
+  }
+
+  grouping vnffgr {
+
+    list vnffgr {
+      key "id";
+
+      leaf id {
+        description "Identifier for the VNFFGR.";
+        type yang:uuid;
+      }
+
+      leaf vnffgd-id-ref {
+        description "VNFFG descriptor id reference";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr"
+            + "[nsr:id=current()/../../ns-instance-config-ref]"
+            + "/nsr:nsd/nsr:vnffgd/nsr:id";
+        }
+      }
+
+      leaf vnffgd-name-ref {
+        description "VNFFG descriptor name reference";
+        type leafref {
+            path "/ns-instance-config/nsr"
+              + "[id=current()/../../ns-instance-config-ref]"
+              + "/nsd/vnffgd[nsr:id = current()/../vnffgd-id-ref]"
+              + "/name";
+        }
+      }
+
+      leaf sdn-account {
+        description
+            "The SDN account to use when requesting resources for
+            this vnffgr";
+        type leafref {
+          path "/rwsdn:sdn-account/rwsdn:name";
+        }
+      }
+
+      leaf operational-status {
+        description
+          "The operational status of the VNFFGR instance
+            init                : The VNFFGR has just started.
+            running             : The VNFFGR is in running state.
+            terminate           : The VNFFGR is being terminated.
+            terminated          : The VNFFGR is in the terminated state.
+            failed              : The VNFFGR instantiation failed
+          ";
+
+        type enumeration {
+          rwpb:enum-type "VnffgrOperationalStatus";
+          enum init;
+          enum running;
+          enum terminate;
+          enum terminated;
+          enum failed;
+        }
+      }
+
+      list rsp {
+        key "id";
+
+        leaf id {
+          description
+              "Identifier for the RSP.";
+          type yang:uuid;
+        }
+
+        leaf name {
+          description
+              "Name for the RSP";
+          type string;
+        }
+
+        leaf vnffgd-rsp-id-ref {
+          description
+              "Identifier for the VNFFG Descriptor RSP reference";
+          type leafref {
+            path "/ns-instance-config/nsr"
+              + "[id=current()/../../../ns-instance-config-ref]"
+              + "/nsd/vnffgd"
+              + "[id=current()/../../vnffgd-id-ref]"
+              + "/rsp/id";
+          }
+        }
+
+        leaf vnffgd-rsp-name-ref {
+          description
+              "Name for the VNFFG Descriptor RSP reference";
+          type leafref {
+            path "/ns-instance-config/nsr:nsr"
+              + "[id=current()/../../../ns-instance-config-ref]"
+              + "/nsd/vnffgd"
+              + "[id=current()/../../vnffgd-id-ref]"
+              + "/rsp[id=current()/../vnffgd-rsp-id-ref]"
+              + "/name";
+          }
+        }
+
+        leaf classifier-name {
+          type string;
+        }
+
+        leaf path-id {
+          description
+              "Unique Identifier for the service path";
+          type uint32;
+        }
+
+        list vnfr-connection-point-ref {
+          key "hop-number";
+          leaf hop-number {
+            description
+                "Monotonically increasing number to show service path hop
+                order";
+            type uint8;
+          }
+          leaf service-function-type {
+            description
+                "Type of Service Function.
+                NOTE: This needs to map with Service Function Type in ODL to
+                support VNFFG. Service Function Type is manadatory param in ODL
+                SFC. This is temporarily set to string for ease of use";
+            type string;
+          }
+
+          leaf member-vnf-index-ref {
+            type uint64;
+          }
+          leaf vnfd-id-ref {
+            description
+                "Reference to VNF Descriptor Id";
+            type string;
+          }
+          leaf vnfr-id-ref {
+            description
+                "A reference to a vnfr id";
+                type leafref {
+                  path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+                }
+          }
+          leaf vnfr-name-ref {
+            description
+                "A reference to a vnfr name";
+                type leafref {
+                  path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+                }
+          }
+          leaf vnfr-connection-point-ref {
+            description
+                "A reference to a vnfr connection point.";
+            type leafref {
+              path "/vnfr:vnfr-catalog/vnfr:vnfr"
+                 + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+                 + "/vnfr:connection-point/vnfr:name";
+            }
+          }
+          leaf service-index {
+            description
+                "Location within the service path";
+            type uint8;
+          }
+          container connection-point-params {
+            leaf mgmt-address {
+              type inet:ip-address;
+            }
+            leaf name {
+              type string;
+            }
+            leaf port-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf vm-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf address {
+              type inet:ip-address;
+            }
+            leaf port {
+              type inet:port-number;
+            }
+          }
+
+          container service-function-forwarder {
+            leaf name {
+              description
+                  "Service Function Forwarder name";
+              type string;
+            }
+            leaf ip-address {
+              description
+                  "Data Plane IP Address of the SFF";
+              type inet:ip-address;
+            }
+            leaf port {
+              description
+                  "Data Plane Port of the SFF";
+              type inet:port-number;
+            }
+          }
+        }
+      }
+
+      list classifier {
+          key "id";
+
+          leaf id {
+            description
+                "Identifier for the classifier rule.";
+            type yang:uuid;
+          }
+          leaf name {
+            description
+                "Name of the classifier.";
+            type string;
+          }
+          leaf rsp-id-ref {
+            description
+                "A reference to the RSP.";
+            type leafref {
+              path "../../nsr:rsp/nsr:id";
+            }
+          }
+          leaf rsp-name {
+            description
+              "Name for the RSP";
+            type string;
+          }
+          leaf vnfr-id-ref {
+            description
+                "A reference to a vnfr id";
+                type leafref {
+                  path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+                }
+          }
+          leaf vnfr-name-ref {
+            description
+                "A reference to a vnfr name";
+                type leafref {
+                  path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+                }
+          }
+          leaf vnfr-connection-point-ref {
+            description
+                "A reference to a vnfr connection point.";
+            type leafref {
+              path "/vnfr:vnfr-catalog/vnfr:vnfr"
+                 + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+                 + "/vnfr:connection-point/vnfr:name";
+            }
+          }
+          leaf port-id {
+            rwpb:field-inline "true";
+            rwpb:field-string-max 64;
+            type string;
+          }
+          leaf vm-id {
+            rwpb:field-inline "true";
+            rwpb:field-string-max 64;
+            type string;
+          }
+          leaf ip-address {
+            type string;
+          }
+          leaf sff-name {
+            type string;
+          }
+      }
+    }
+  }
+
+  container ns-instance-opdata {
+    config false;
+
+    list nsr {
+      key "ns-instance-config-ref";
+
+      leaf ns-instance-config-ref {
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf name-ref {
+        description "Network service name reference";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:name";
+        }
+      }
+
+      leaf nsd-ref {
+        description "Network service descriptor id reference";
+        type leafref {
+          path "/ns-instance-config/nsr"
+            + "[id=current()/../ns-instance-config-ref]"
+            + "/nsd/id";
+        }
+      }
+
+      leaf nsd-name-ref {
+        description "Network service descriptor name reference";
+        type leafref {
+          path "/ns-instance-config/nsr"
+            + "[id=current()/../ns-instance-config-ref]"
+            + "/nsd/name";
+        }
+      }
+
+      leaf create-time {
+        description
+          "Creation timestamp of this Network Service.
+          The timestamp is expressed as seconds
+          since unix epoch - 1970-01-01T00:00:00Z";
+
+        type uint32;
+      }
+
+      list connection-point {
+        description
+            "List for external connection points.
+            Each NS has one or more external connection points.
+            As the name implies that external connection points
+            are used for connecting the NS to other NS or to
+            external networks. Each NS exposes these connection
+            points to the orchestrator. The orchestrator can
+            construct network service chains by connecting the
+            connection points between different NS.";
+
+        key "name";
+        leaf name {
+          description
+              "Name of the NS connection point.";
+          type string;
+        }
+
+        leaf type {
+          description
+              "Type of the connection point.";
+          type manotypes:connection-point-type;
+        }
+      }
+
+      list vlr {
+        key "vlr-ref";
+        leaf vlr-ref {
+          description
+              "Reference to a VLR record in the VLR catalog";
+          type leafref {
+            path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+          }
+        }
+
+
+        list vnfr-connection-point-ref {
+          description
+            "A list of references to connection points.";
+          key "vnfr-id";
+
+          leaf vnfr-id {
+            description "A reference to a vnfr";
+            type leafref {
+              path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+            }
+          }
+
+          leaf connection-point {
+            description
+                "A reference to a connection point name in a vnfr";
+            type leafref {
+              path "/vnfr:vnfr-catalog/vnfr:vnfr"
+                 + "[vnfr:id = current()/../nsr:vnfr-id]"
+                 + "/vnfr:connection-point/vnfr:name";
+            }
+          }
+        }
+      }
+
+      list constituent-vnfr-ref {
+        description
+            "List of VNFRs that are part of this
+             network service.";
+        key "vnfr-id";
+
+        leaf vnfr-id {
+          description
+            "Reference to the VNFR id
+             This should be a leafref to /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id
+             But due to confd bug (RIFT-9451), changing to string.
+            ";
+          type string;
+        }
+      }
+
+      list scaling-group-record {
+        description "List of scaling group records";
+        key "scaling-group-name-ref";
+
+        leaf scaling-group-name-ref {
+          description "name of the scaling group";
+          type leafref {
+            path "/ns-instance-config/nsr"
+              + "[id=current()/../../ns-instance-config-ref]"
+              + "/nsd/scaling-group-descriptor/name";
+          }
+        }
+
+        list instance {
+          description "Reference to scaling group instance record";
+          key "instance-id";
+          leaf instance-id {
+            description "Scaling group instance id";
+            type uint16;
+          }
+
+          leaf is-default {
+            description "Flag indicating whether this instance was part of
+                default scaling group (and thus undeletable)";
+            type boolean;
+          }
+
+          leaf op-status {
+            description
+              "The operational status of the NS instance
+                init                : The scaling group has just started.
+                vnf-init-phase      : The VNFs in the scaling group are being instantiated.
+                running             : The scaling group  is in running state.
+                terminate           : The scaling group is being terminated.
+                vnf-terminate-phase : The VNFs in the scaling group are being terminated.
+                terminated          : The scaling group  is in the terminated state.
+                failed              : The scaling group instantiation failed.
+              ";
+
+            type enumeration {
+              enum init;
+              enum vnf-init-phase;
+              enum running;
+              enum terminate;
+              enum vnf-terminate-phase;
+              enum terminated;
+              enum failed;
+            }
+          }
+
+          leaf config-status {
+            description
+              "The configuration status of the scaling group instance
+               configuring : At least one of the VNFs in this scaling group instance
+                             is in configuring state
+               configured  : All the VNFs in this scaling group instance are
+                             configured or config-not-needed state
+               failed      : Configuring this scaling group instance failed
+              ";
+            type config-states;
+          }
+
+          leaf error-msg {
+            description
+              "Reason for failure in configuration of this scaling instance";
+            type string;
+          }
+
+          leaf create-time {
+            description
+              "Creation timestamp of this scaling group record.
+              The timestamp is expressed as seconds
+              since unix epoch - 1970-01-01T00:00:00Z";
+
+              type uint32;
+          }
+
+          leaf-list vnfrs {
+            description "Reference to VNFR within the scale instance";
+            type leafref {
+              path "../../../constituent-vnfr-ref/vnfr-id";
+            }
+          }
+        }
+      }
+
+      uses vnffgr;
+
+      leaf operational-status {
+        description
+          "The operational status of the NS instance
+            init                : The network service has just started.
+            vl-init-phase       : The VLs in the NS are being instantiated.
+            vnf-init-phase      : The VNFs in the NS are being instantiated.
+            running             : The NS is in running state.
+            terminate           : The NS is being terminated.
+            vnf-terminate-phase : The NS is terminating the VNFs in the NS.
+            vl-terminate-phase  : The NS is terminating the VLs in the NS.
+            terminated          : The NS is in the terminated state.
+            failed              : The NS instantiation failed.
+            scaling-out         : The NS is scaling out
+            scaling-in          : The NS is scaling in
+            vl-instantiate      : The NS is initiating a new VL
+            vl-terminate        : The NS is terminating a VL
+          ";
+
+        type enumeration {
+          enum init;
+          enum vl-init-phase;
+          enum vnf-init-phase;
+          enum running;
+          enum terminate;
+          enum vnf-terminate-phase;
+          enum vl-terminate-phase;
+          enum terminated;
+          enum failed;
+          enum scaling-out;
+          enum scaling-in;
+          enum vl-instantiate;
+          enum vl-terminate;
+        }
+      }
+
+      leaf config-status {
+        description
+          "The configuration status of the NS instance
+            configuring: At least one of the VNFs in this instance is in configuring state
+            configured:  All the VNFs in this NS instance are configured or config-not-needed state
+          ";
+        type config-states;
+      }
+
+      uses manotypes:ns-service-primitive;
+
+      list initial-config-primitive {
+        rwpb:msg-new NsrInitialConfigPrimitive;
+        description
+            "Initial set of configuration primitives for NSD.";
+        key "seq";
+        leaf seq {
+          description
+              "Sequence number for the configuration primitive.";
+          type uint64;
+        }
+
+        leaf name {
+          description
+              "Name of the configuration primitive.";
+          type string;
+          mandatory "true";
+        }
+
+        leaf user-defined-script {
+          description
+              "A user defined script.";
+          type string;
+        }
+
+        list parameter {
+          key "name";
+          leaf name {
+            type string;
+          }
+
+          leaf value {
+            type string;
+          }
+        }
+      }
+
+
+      list monitoring-param {
+        description
+          "List of NS level params.";
+        key "id";
+
+        uses manotypes:monitoring-param-value;
+        uses manotypes:monitoring-param-ui-data;
+        uses manotypes:monitoring-param-aggregation;
+
+        leaf id {
+          type string;
+        }
+
+        leaf name {
+          type string;
+        }
+
+        leaf nsd-mon-param-ref {
+          description "Reference to the NSD monitoring param descriptor
+                       that produced this result";
+          type leafref {
+            path "/nsd:nsd-catalog/nsd:nsd[nsd:id = current()/" +
+                 "../../nsr:nsd-ref]/nsd:monitoring-param/nsd:id";
+          }
+        }
+
+        list vnfr-mon-param-ref {
+          description "A list of VNFR monitoring params associated with this monp";
+          key "vnfr-id-ref vnfr-mon-param-ref";
+
+          leaf vnfr-id-ref {
+            description
+               "A reference to a vnfr. This is a
+                leafref to path:
+                    /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+
+            type yang:uuid;
+          }
+
+          leaf vnfr-mon-param-ref {
+            description "A reference to the VNFR monitoring param";
+            type leafref {
+              path "/vnfr:vnfr-catalog/vnfr:vnfr"
+                + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+                + "/vnfr:monitoring-param/vnfr:id";
+            }
+          }
+        }
+      }
+
+      list config-agent-job {
+        key "job-id";
+
+        leaf job-id {
+          description "config agent job Identifier for the NS.";
+          type uint64;
+        }
+
+        leaf job-name {
+          description "Config agent job name";
+          type string;
+        }
+
+        leaf job-status {
+          description
+              "Job status to be set based on each VNF primitive execution,
+               pending  - if at least one VNF is in pending state
+                          and remaining VNFs are in success state.
+               Success  - if all VNF executions are in success state
+               failure  - if one of the VNF executions is failure";
+          type enumeration {
+            enum pending;
+            enum success;
+            enum failure;
+          }
+        }
+
+        leaf triggered-by {
+          description "The primitive is triggered from NS or VNF level";
+          type trigger-type;
+        }
+
+        leaf create-time {
+          description
+            "Creation timestamp of this Config Agent Job.
+            The timestamp is expressed as seconds
+            since unix epoch - 1970-01-01T00:00:00Z";
+
+          type uint32;
+        }
+
+        leaf job-status-details {
+          description "Config agent job status details, in case of errors";
+          type string;
+        }
+
+        uses manotypes:primitive-parameter-value;
+
+        list parameter-group {
+          description
+              "List of NS Primitive parameter groups";
+          key "name";
+          leaf name {
+            description
+                "Name of the parameter.";
+            type string;
+          }
+
+          uses manotypes:primitive-parameter-value;
+        }
+
+        list vnfr {
+          key "id";
+          leaf id {
+            description "Identifier for the VNFR.";
+            type yang:uuid;
+          }
+          leaf vnf-job-status {
+            description
+                "Job status to be set based on each VNF primitive execution,
+                 pending  - if at least one primitive is in pending state
+                            and remaining primitives are in success state.
+                 Success  - if all primitive executions are in success state
+                 failure  - if one of the primitive executions is failure";
+            type enumeration {
+              enum pending;
+              enum success;
+              enum failure;
+            }
+          }
+
+          list primitive {
+            key "name";
+            leaf name {
+              description "the name of the primitive";
+              type string;
+            }
+
+            uses manotypes:primitive-parameter-value;
+
+            leaf execution-id {
+              description "Execution id of the primitive";
+              type string;
+            }
+            leaf execution-status {
+              description "status of the Execution";
+              type enumeration {
+                enum pending;
+                enum success;
+                enum failure;
+              }
+            }
+            leaf execution-error-details {
+              description "Error details if execution-status is failure";
+              type string;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  rpc get-ns-service-primitive-values {
+    description "Get the service primitive parameter values";
+    input {
+      leaf nsr_id_ref {
+        description "Reference to NSR ID ref";
+        mandatory true;
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf name {
+        description "Name of the NS service primitive group";
+        mandatory true;
+        type string;
+      }
+    }
+
+    output {
+      list ns-parameter {
+        description "Automatically generated parameter";
+        key "name";
+
+        leaf name {
+          description "Parameter name which should be pulled from a parameter pool";
+          type string;
+        }
+        leaf value {
+          description "Automatically generated value";
+          type string;
+        }
+      }
+
+      list ns-parameter-group {
+        description "Automatically generated parameters in parameter group";
+        key "name";
+        leaf name {
+          description "Parameter group name";
+          type string;
+        }
+        list parameter {
+          description "Automatically generated group parameter";
+          key "name";
+
+          leaf name {
+            description "Parameter name which should be pulled from a parameter pool";
+            type string;
+          }
+          leaf value {
+            description "Automatically generated value";
+            type string;
+          }
+        }
+      }
+
+      list vnf-primitive-group {
+        description
+            "List of service primitives grouped by VNF.";
+
+        key "member-vnf-index-ref";
+        leaf member-vnf-index-ref {
+          description
+              "Reference to member-vnf within constituent-vnfds";
+          type uint64;
+        }
+
+        leaf vnfd-id-ref {
+          description
+              "A reference to a vnfd. This is a
+               leafref to path:
+                   ../../../../nsd:constituent-vnfd
+                   + [nsd:id = current()/../nsd:id-ref]
+                   + /nsd:vnfd-id-ref
+               NOTE: An issue with confd is preventing the
+               use of xpath. Seems to be an issue with leafref
+               to leafref, whose target is in a different module.
+               Once that is resovled this will switched to use
+               leafref";
+
+          type string;
+        }
+
+        list primitive {
+          key "index";
+          leaf index {
+            description "Index of this primitive";
+            type uint32;
+          }
+
+          leaf name {
+            description "Name of the primitive associated with a value pool";
+            type string;
+          }
+
+          list parameter {
+            description "Automatically generated parameter";
+            key "name";
+
+            leaf name {
+              description "Parameter name which should be pulled from a parameter pool";
+              type string;
+            }
+            leaf value {
+              description "Automatically generated value";
+              type string;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  rpc exec-ns-service-primitive {
+    description "Executes a NS service primitive or script";
+
+    input {
+      leaf name {
+        description "Name of the primitive";
+        type string;
+      }
+
+      leaf nsr_id_ref {
+        description "Reference to NSR ID ref";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf triggered-by {
+        description "The primitive is triggered from NS or VNF level";
+        type trigger-type;
+        default ns-primitive;
+      }
+
+      uses manotypes:primitive-parameter-value;
+
+      list parameter-group {
+        description
+            "List of NS Primitive parameter groups";
+        key "name";
+        leaf name {
+          description
+              "Name of the parameter.";
+          type string;
+        }
+
+        uses manotypes:primitive-parameter-value;
+      }
+
+      list vnf-list {
+        description
+            "List of VNFs whose primitives are being set.";
+        key "member_vnf_index_ref";
+
+        leaf member_vnf_index_ref {
+          description "Member VNF index";
+          type uint64;
+        }
+
+        leaf vnfr-id-ref {
+          description
+              "A reference to a vnfr. This is a
+               leafref to path";
+          type yang:uuid;
+        }
+
+        list vnf-primitive {
+          description
+              "List of service primitives supported by the
+            configuration agent for this VNF.";
+          key "index";
+
+          leaf index {
+            description
+                "index of the service primitive.";
+            type uint32;
+          }
+          leaf name {
+            description
+                "Name of the service primitive.";
+            type string;
+          }
+
+          uses manotypes:primitive-parameter-value;
+        }
+      }
+      leaf user-defined-script {
+        description
+            "A user defined script.";
+        type string;
+      }
+    }
+    output {
+      leaf job-id {
+        description "Job identifier for this RPC";
+        type uint64;
+      }
+
+      leaf name {
+        description "Name of the service primitive";
+        type string;
+      }
+
+      leaf nsr_id_ref {
+        description "Reference to NSR ID ref";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf triggered-by {
+        description "The primitive is triggered from NS or VNF level";
+        type trigger-type;
+      }
+
+      leaf create-time {
+        description
+          "Creation timestamp of this config agent JOB.
+          The timestamp is expressed as seconds
+          since unix epoch - 1970-01-01T00:00:00Z";
+
+        type uint32;
+      }
+
+      leaf job-status-details {
+        description "Job status details, in case of any errors";
+        type string;
+      }
+
+      uses manotypes:primitive-parameter-value;
+
+      list parameter-group {
+        description
+            "List of NS Primitive parameter groups";
+        key "name";
+        leaf name {
+          description
+              "Name of the parameter.";
+          type string;
+        }
+
+        uses manotypes:primitive-parameter-value;
+      }
+
+      list vnf-out-list {
+        description
+            "List of VNFs whose primitives were set.";
+        key "member_vnf_index_ref";
+
+        leaf member_vnf_index_ref {
+          description "Member VNF index";
+          type uint64;
+        }
+        leaf vnfr-id-ref {
+          description
+              "A reference to a vnfr. This is a
+               leafref to path";
+          type yang:uuid;
+        }
+
+        list vnf-out-primitive {
+          description
+              "List of service primitives supported by the
+            configuration agent for this VNF.";
+          key "index";
+
+          leaf index {
+            description
+                "index of the service primitive.";
+            type uint32;
+          }
+
+          leaf name {
+            description
+                "Name of the service primitive.";
+            type string;
+          }
+
+          uses manotypes:primitive-parameter-value;
+
+          leaf execution-id {
+            description "Execution id of this primitive";
+            type string;
+          }
+
+          leaf execution-status {
+            description "Status of the execution of this primitive";
+            type string;
+          }
+
+          leaf execution-error-details {
+            description "Error details if execution-status is failed";
+            type string;
+          }
+        }
+      }
+    }
+  }
+
+  rpc exec-scale-in {
+    description "Executes scale out request";
+
+    input {
+
+      leaf nsr-id-ref {
+        description "Reference to NSR ID ref";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf scaling-group-name-ref {
+        description "name of the scaling group";
+        type string;
+      }
+
+      leaf instance-id {
+        description "id of the scaling group";
+        type uint64;
+      }
+
+
+    }
+    output {
+      leaf instance-id {
+        description "id of the scaling group";
+        type uint64;
+      }
+    }
+  }
+
+  rpc exec-scale-out {
+    description "Executes scale out request";
+
+    input {
+
+      leaf nsr-id-ref {
+        description "Reference to NSR ID ref";
+        type leafref {
+          path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+        }
+      }
+
+      leaf scaling-group-name-ref {
+        description "name of the scaling group";
+        type string;
+      }
+
+      leaf instance-id {
+        description "id of the scaling group";
+        type uint64;
+      }
+
+    }
+    output {
+     leaf instance-id {
+        description "id of the scaling group";
+        type uint64;
+      }
+    }
+  }
+
+}
diff --git a/models/plugins/yang/odl-network-topology.yang b/models/plugins/yang/odl-network-topology.yang
new file mode 100644 (file)
index 0000000..9c7101d
--- /dev/null
@@ -0,0 +1,359 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module odl-network-topology  {
+    yang-version 1;
+    namespace "urn:TBD:params:xml:ns:yang:network-topology";
+    // replace with IANA namespace when assigned
+    prefix "nt";
+
+    import ietf-inet-types { prefix "inet";  }
+
+    organization "TBD";
+
+    contact "WILL-BE-DEFINED-LATER";
+
+    description
+        "This module defines a model for the topology of a network.
+        Key design decisions are as follows:
+        A topology consists of a set of nodes and links.
+        Links are point-to-point and unidirectional.
+        Bidirectional connections need to be represented through
+        two separate links.
+        Multipoint connections, broadcast domains etc can be represented
+        through a hierarchy of nodes, then connecting nodes at
+        upper layers of the hierarchy.";
+
+    revision 2013-10-21 {
+        description
+            "Initial revision.";
+    }
+
+    typedef topology-id {
+        type inet:uri;
+        description
+            "An identifier for a topology.";
+    }
+
+    typedef node-id {
+        type inet:uri;
+        description
+            "An identifier for a node in a topology.
+            The identifier may be opaque.
+            The identifier SHOULD be chosen such that the same node in a
+            real network topology will always be identified through the
+            same identifier, even if the model is instantiated in separate
+            datastores. An implementation MAY choose to capture semantics
+            in the identifier, for example to indicate the type of node
+            and/or the type of topology that the node is a part of.";
+    }
+
+
+    typedef link-id {
+        type inet:uri;
+        description
+            "An identifier for a link in a topology.
+            The identifier may be opaque.
+            The identifier SHOULD be chosen such that the same link in a
+            real network topology will always be identified through the
+            same identifier, even if the model is instantiated in separate
+            datastores. An implementation MAY choose to capture semantics
+            in the identifier, for example to indicate the type of link
+            and/or the type of topology that the link is a part of.";
+    }
+
+    typedef tp-id {
+        type inet:uri;
+        description
+            "An identifier for termination points on a node.
+            The identifier may be opaque.
+            The identifier SHOULD be chosen such that the same TP in a
+            real network topology will always be identified through the
+            same identifier, even if the model is instantiated in separate
+            datastores. An implementation MAY choose to capture semantics
+            in the identifier, for example to indicate the type of TP
+            and/or the type of node and topology that the TP is a part of.";
+    }
+
+    typedef tp-ref {
+        type leafref {
+            path "/network-topology/topology/node/termination-point/tp-id";
+        }
+        description
+            "A type for an absolute reference to a termination point.
+            (This type should not be used for relative references.
+            In such a case, a relative path should be used instead.)";
+    }
+    typedef topology-ref {
+        type leafref {
+            path "/network-topology/topology/topology-id";
+        }
+        description
+            "A type for an absolute reference a topology instance.";
+    }
+
+    typedef node-ref {
+        type leafref {
+            path "/network-topology/topology/node/node-id";
+        }
+        description
+
+            "A type for an absolute reference to a node instance.
+            (This type should not be used for relative references.
+            In such a case, a relative path should be used instead.)";
+    }
+
+    typedef link-ref {
+        type leafref {
+            path "/network-topology/topology/link/link-id";
+        }
+        description
+            "A type for an absolute reference a link instance.
+            (This type should not be used for relative references.
+            In such a case, a relative path should be used instead.)";
+    }
+
+    grouping tp-attributes {
+        description
+            "The data objects needed to define a termination point.
+            (This only includes a single leaf at this point, used
+            to identify the termination point.)
+            Provided in a grouping so that in addition to the datastore,
+            the data can also be included in notifications.";
+        leaf tp-id {
+            type tp-id;
+        }
+        leaf-list tp-ref {
+            type tp-ref;
+            config false;
+            description
+                "The leaf list identifies any termination points that the
+                termination point is dependent on, or maps onto.
+                Those termination points will themselves be contained
+                in a supporting node.
+                This dependency information can be inferred from
+                the dependencies between links.  For this reason,
+                this item is not separately configurable.  Hence no
+                corresponding constraint needs to be articulated.
+                The corresponding information is simply provided by the
+                implementing system.";
+        }
+    }
+
+    grouping node-attributes {
+        description
+            "The data objects needed to define a node.
+            The objects are provided in a grouping so that in addition to
+            the datastore, the data can also be included in notifications
+            as needed.";
+
+        leaf node-id {
+            type node-id;
+            description
+                "The identifier of a node in the topology.
+                A node is specific to a topology to which it belongs.";
+        }
+        list supporting-node {
+            description
+                "This list defines vertical layering information for nodes.
+                It allows to capture for any given node, which node (or nodes)
+                in the corresponding underlay topology it maps onto.
+                A node can map to zero, one, or more nodes below it;
+                accordingly there can be zero, one, or more elements in the list.
+                If there are specific layering requirements, for example
+                specific to a particular type of topology that only allows
+                for certain layering relationships, the choice
+                below can be augmented with additional cases.
+                A list has been chosen rather than a leaf-list in order
+                to provide room for augmentations, e.g. for
+                statistics or priorization information associated with
+                supporting nodes.";
+            // This is not what was published in the initial draft,
+            // added topology-ref leaf and added it to the key
+            key "topology-ref node-ref";
+            leaf topology-ref {
+                type topology-ref;
+            }
+            leaf node-ref {
+                type node-ref;
+            }
+        }
+    }
+
+    grouping link-attributes {
+        // This is a grouping, not defined inline with the link definition itself,
+        // so it can be included in a notification, if needed
+        leaf link-id {
+            type link-id;
+            description
+                "The identifier of a link in the topology.
+                A link is specific to a topology to which it belongs.";
+        }
+        container source {
+            leaf source-node {
+                mandatory true;
+                type node-ref;
+                description
+                    "Source node identifier, must be in same topology.";
+            }
+            leaf source-tp {
+                type tp-ref;
+                description
+                    "Termination point within source node that terminates the link.";
+
+            }
+        }
+        container destination {
+            leaf dest-node {
+                mandatory true;
+                type node-ref;
+                description
+                    "Destination node identifier, must be in same topology.";
+            }
+            leaf dest-tp {
+                type tp-ref;
+                description
+                    "Termination point within destination node that terminates the link.";
+            }
+        }
+        list supporting-link {
+            key "link-ref";
+            leaf link-ref {
+                type link-ref;
+            }
+        }
+    }
+
+
+    container network-topology {
+        list topology {
+            description "
+                This is the model of an abstract topology.
+                A topology contains nodes and links.
+                Each topology MUST be identified by
+                unique topology-id for reason that a network could contain many
+                topologies.
+            ";
+            key "topology-id";
+            leaf topology-id {
+                type topology-id;
+                description "
+                    It is presumed that a datastore will contain many topologies. To
+                    distinguish between topologies it is vital to have UNIQUE
+                    topology identifiers.
+                ";
+            }
+            leaf server-provided {
+                type boolean;
+                config false;
+                description "
+                    Indicates whether the topology is configurable by clients,
+                    or whether it is provided by the server.  This leaf is
+
+                    populated by the server implementing the model.
+                    It is set to false for topologies that are created by a client;
+                    it is set to true otherwise.  If it is set to true, any
+                    attempt to edit the topology MUST be rejected.
+                ";
+            }
+            container topology-types {
+                description
+                    "This container is used to identify the type, or types
+                    (as a topology can support several types simultaneously),
+                    of the topology.
+                    Topology types are the subject of several integrity constraints
+                    that an implementing server can validate in order to
+                    maintain integrity of the datastore.
+                    Topology types are indicated through separate data nodes;
+                    the set of topology types is expected to increase over time.
+                    To add support for a new topology, an augmenting module
+                    needs to augment this container with a new empty optional
+                    container to indicate the new topology type.
+                    The use of a container allows to indicate a subcategorization
+                    of topology types.
+                    The container SHALL NOT be augmented with any data nodes
+                    that serve a purpose other than identifying a particular
+                    topology type.
+                ";
+            }
+            list underlay-topology {
+                key "topology-ref";
+                leaf topology-ref {
+                    type topology-ref;
+                }
+                // a list, not a leaf-list, to allow for potential augmentation
+                // with properties specific to the underlay topology,
+                // such as statistics, preferences, or cost.
+                description
+                    "Identifies the topology, or topologies, that this topology
+                    is dependent on.";
+            }
+
+            list node {
+                description "The list of network nodes defined for the topology.";
+                key "node-id";
+                uses node-attributes;
+                must "boolean(../underlay-topology[*]/node[./supporting-nodes/node-ref])";
+                    // This constraint is meant to ensure that a referenced node is in fact
+                    // a node in an underlay topology.
+                list termination-point {
+                    description
+
+                        "A termination point can terminate a link.
+                        Depending on the type of topology, a termination point could,
+                        for example, refer to a port or an interface.";
+                    key "tp-id";
+                    uses tp-attributes;
+                }
+            }
+
+            list link {
+                description "
+                    A Network Link connects a by Local (Source) node and
+                    a Remote (Destination) Network Nodes via a set of the
+                    nodes' termination points.
+                    As it is possible to have several links between the same
+                    source and destination nodes, and as a link could potentially
+                    be re-homed between termination points, to ensure that we
+                    would always know to distinguish between links, every link
+                    is identified by a dedicated link identifier.
+                    Note that a link models a point-to-point link, not a multipoint
+                    link.
+                    Layering dependencies on links in underlay topologies are
+                    not represented as the layering information of nodes and of
+                    termination points is sufficient.
+                ";
+                key "link-id";
+                uses link-attributes;
+                must "boolean(../underlay-topology/link[./supporting-link])";
+                    // Constraint: any supporting link must be part of an underlay topology
+                must "boolean(../node[./source/source-node])";
+                    // Constraint: A link must have as source a node of the same topology
+                must "boolean(../node[./destination/dest-node])";
+                    // Constraint: A link must have as source a destination of the same topology
+                must "boolean(../node/termination-point[./source/source-tp])";
+                    // Constraint: The source termination point must be contained in the source node
+                must "boolean(../node/termination-point[./destination/dest-tp])";
+                    // Constraint: The destination termination point must be contained
+                    // in the destination node
+            }
+        }
+    }
+}
diff --git a/models/plugins/yang/pnfd.yang b/models/plugins/yang/pnfd.yang
new file mode 100644 (file)
index 0000000..e1f39a7
--- /dev/null
@@ -0,0 +1,104 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module pnfd
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:pnfd";
+  prefix "pnfd";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines 
+       the Physical Network Function Descriptor (PNFD)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  container pnfd-catalog {
+
+    list pnfd {
+      key "id";
+
+      leaf id {
+        description "Identifier for the PNFD.";
+        type yang:uuid;
+      }
+
+      leaf name {
+        description "PNFD name.";
+        type string;
+      }
+
+      leaf short-name {
+        description "PNFD short name.";
+        type string;
+      }
+
+      leaf vendor {
+        description "Vendor of the PNFD.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the PNFD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the PNFD";
+        type string;
+      }
+
+      list connection-point {
+        description
+            "List for external connection points. Each PNF has one or more external
+            connection points.";
+        key "id";
+        leaf id {
+          description
+              "Identifier for the external connection points";
+          type uint64;
+        }
+
+        leaf cp-type {
+          description
+              "Type of the connection point.";
+          type manotypes:connection-point-type;
+        }
+      }
+    }
+  }
+}
diff --git a/models/plugins/yang/rw-nsd.yang b/models/plugins/yang/rw-nsd.yang
new file mode 100644 (file)
index 0000000..4475928
--- /dev/null
@@ -0,0 +1,57 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-nsd
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-nsd";
+  prefix "rw-nsd";
+
+  import nsd {
+    prefix "nsd";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO NSD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  augment /nsd:nsd-catalog/nsd:nsd {
+    uses manotypes:control-param;
+    uses manotypes:action-param;
+    leaf meta {
+      description
+        "Any meta-data needed by the UI";
+      type string;
+    }
+  }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-nsr.tailf.yang b/models/plugins/yang/rw-nsr.tailf.yang
new file mode 100644 (file)
index 0000000..3b7588a
--- /dev/null
@@ -0,0 +1,45 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-nsr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-nsr-annotation";
+  prefix "rw-nsr-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import rw-nsr {
+    prefix rw-nsr;
+  }
+
+  import nsr {
+    prefix nsr;
+  }
+
+  tailf:annotate "/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/models/plugins/yang/rw-nsr.yang b/models/plugins/yang/rw-nsr.yang
new file mode 100644 (file)
index 0000000..d73e7fa
--- /dev/null
@@ -0,0 +1,461 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-nsr
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-nsr";
+  prefix "rw-nsr";
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import nsr {
+    prefix "nsr";
+  }
+
+  import nsd {
+    prefix "nsd";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-config-agent {
+    prefix "rw-ca";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO VNFD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  grouping operational-events {
+    list operational-events {
+      key "id";
+      description
+        "Recent operational events for this network service.
+        Though the model does not impose any restrictions on the numbe of events, 
+        the max operational events will be limited to the most recent 10"; 
+
+      leaf id {
+        description "The id of the instance";
+        type uint64;
+      }
+
+      leaf timestamp {
+        description
+          "The timestamp of this event expressed as seconds since
+          unix epoch - 1970-01-01T00:00:00Z";
+        type uint32;
+      }
+      leaf event {
+        description "Short description of the event";
+        type string;
+      }
+      leaf description {
+        description
+          "The description of this event";
+        type string;
+      }
+      leaf details {
+        description
+          "The detailed description of this event (in case of errors)";
+        type string;
+      }
+    }
+  }
+
+  grouping nsd-ref-count {
+    list nsd-ref-count {
+      key "nsd-id-ref";
+      description "This table maintains the number of NSRs used by each NSD";
+
+      leaf nsd-id-ref {
+        description "Reference to NSD";
+        type leafref {
+          path "/nsd:nsd-catalog/nsd:nsd/nsd:id";
+        }
+      }
+      leaf instance-ref-count {
+        description
+          "Reference count for the number of NSRs refering this NSD.
+           Every NS record instantiated using this descriptor takes
+           a reference on the NSD and releases the reference when the
+           network service is terminated. This desciptor cannot be
+           deleted when this counter is non zero";
+        type uint64;
+      }
+    }
+  }
+
+  grouping rw-ns-instance-config {
+    leaf cloud-account {
+      description
+        "The configured cloud account which the NSR is instantiated within.
+         All VDU's, Virtual Links, and provider networks will be requested
+         using the cloud-account's associated CAL instance";
+      type leafref {
+        path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+      }
+    }
+
+    leaf om-datacenter {
+      description
+        "Openmano datacenter name to use when instantiating
+         the network service.  This is only used when openmano
+         is selected as the cloud account.  This should be superceded
+         by multiple cloud accounts when that becomes available.";
+      type string;
+    }
+      
+    list vnf-cloud-account-map {
+      description 
+          "Mapping VNF to Cloud Account where VNF will be instantiated";
+
+      key "member-vnf-index-ref";
+      leaf member-vnf-index-ref {
+        type uint64;
+      }
+
+      leaf cloud-account {
+        description
+            "The configured cloud account where VNF is instantiated within.
+            All VDU's, Virtual Links, and provider networks will be requested
+            using the cloud-account's associated CAL instance";
+        type leafref {
+          path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+        }
+      }
+
+      leaf om-datacenter {
+        description
+            "Openmano datacenter name to use when instantiating
+            the network service.  This is only used when openmano
+            is selected as the cloud account.  This should be superceded
+            by multiple cloud accounts when that becomes available.";
+        type string;
+      }
+
+      leaf config-agent-account {
+        description
+          "The configured config agent account to use for instantiating this VNF.
+          The configuration for this VNF will be driven using the specified config
+          agent account";
+        type leafref {
+          path "/rw-ca:config-agent/rw-ca:account/rw-ca:name";
+        }
+      }
+    }
+
+    list vl-cloud-account-map {
+      description 
+          "Mapping VL to Cloud Account where VL will be instantiated";
+
+      key "vld-id-ref";
+
+      leaf vld-id-ref {
+        description 
+            "A reference to a vld.
+            leafref path ../../nsd/vld/id";
+        type string;
+      }
+
+      leaf-list cloud-accounts {
+        description
+            "The configured list of cloud accounts where VL is instantiated.
+            All VDU's, Virtual Links, and provider networks will be requested
+            using the cloud-account's associated CAL instance";
+        type leafref {
+          path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+        }
+      }
+
+      leaf-list om-datacenters {
+        description
+            "Openmano datacenter names to use when instantiating
+            the VLs. This is only used when openmano
+            is selected as the cloud account.  This should be superceded
+            by multiple cloud accounts when that becomes available.";
+        type string;
+      }
+    }
+  }
+
+
+  augment /nsr:ns-instance-config/nsr:nsr {
+    uses rw-ns-instance-config;
+  }
+
+  augment /nsr:start-network-service/nsr:input{
+    uses rw-ns-instance-config;
+  } 
+
+  augment /nsr:ns-instance-opdata/nsr:nsr {
+    uses manotypes:action-param;
+    uses manotypes:control-param;
+
+    leaf sdn-account {
+      description
+        "The SDN account associted with the cloud account using which an
+         NS was instantiated.";
+      type leafref {
+        path "/rw-sdn:sdn-account/rw-sdn:name";
+      }
+    }
+
+    leaf config-status-details {
+      description
+        "The configuration status error details of the NS instance, in case of any errors";
+      type string;
+    }
+
+    container nfvi-metrics {
+      container vm {
+        leaf label {
+          description
+            "Label to show in UI";
+          type string;
+          default "VM";
+        }
+
+        leaf active-vm {
+          description
+            "The number of active VMs.";
+          type uint64;
+        }
+
+        leaf inactive-vm {
+          description
+            "The number of inactive VMs.";
+          type uint64;
+        }
+      }
+
+      uses manotypes:nfvi-metrics;
+    }
+
+    container epa-param {
+      container ovs-acceleration {
+        leaf label {
+          description
+            "Label to show in UI for the param";
+          type string;
+          default "OVS ACCELERATION";
+        }
+
+        leaf vm {
+          description
+            "Number of VMs with the EPA attribute";
+          type uint64;
+        }
+
+        leaf unit {
+          description
+            "Unit label to show in the UI";
+          type string;
+          default "vms";
+        }
+      }
+
+      container ovs-offload {
+        leaf label {
+          description
+            "Label to show in UI for the param";
+          type string;
+          default "OVS OFFLOAD";
+        }
+
+        leaf vm {
+          description
+            "Number of VMs with the EPA attribute";
+          type uint64;
+        }
+
+        leaf unit {
+          description
+            "Unit label to show in the UI";
+          type string;
+          default "vms";
+        }
+
+      }
+
+      container ddio {
+        leaf label {
+          description
+            "Label to show in UI for the param";
+          type string;
+          default "DDIO";
+        }
+
+        leaf vm {
+          description
+            "Number of VMs with the EPA attribute";
+          type uint64;
+        }
+
+        leaf unit {
+          description
+            "Unit label to show in the UI";
+          type string;
+          default "vms";
+        }
+
+      }
+
+      container cat {
+        leaf label {
+          description
+            "Label to show in UI for the param";
+          type string;
+          default "CAT";
+        }
+
+        leaf vm {
+          description
+            "Number of VMs with the EPA attribute";
+          type uint64;
+        }
+
+        leaf unit {
+          description
+            "Unit label to show in the UI";
+          type string;
+          default "vms";
+        }
+      }
+
+      container cmt {
+        leaf label {
+          description
+            "Label to show in UI for the param";
+          type string;
+          default "CMT";
+        }
+
+        leaf vm {
+          description
+            "Number of VMs with the EPA attribute";
+          type uint64;
+        }
+
+        leaf unit {
+          description
+            "Unit label to show in the UI";
+          type string;
+          default "vms";
+        }
+
+      }
+    }
+    uses operational-events;
+  }
+
+  augment /nsr:ns-instance-opdata {
+    uses nsd-ref-count;
+  }
+
+  augment /nsr:ns-instance-opdata/nsr:nsr/nsr:vlr {
+    leaf assigned-subnet {
+      description "Subnet added for the VL";
+      type string;
+    }
+    leaf cloud-account {
+      description
+        "The configured cloud account in which the VL is instantiated within.";
+      type leafref {
+        path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+      }
+    }
+    leaf om-datacenter {
+      description
+        "Openmano datacenter name to use when instantiating
+         the network service.  This is only used when openmano
+         is selected as the cloud account.  This should be superceded
+         by multiple cloud accounts when that becomes available.";
+      type string;
+    }
+  }
+
+  augment /nsr:ns-instance-opdata/nsr:nsr/nsr:constituent-vnfr-ref {
+    leaf cloud-account {
+      description
+        "The configured cloud account in which the VNF is instantiated within.
+         All VDU's, Virtual Links, and provider networks will be requested
+         using the cloud-account's associated CAL instance";
+      type leafref {
+        path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+      }
+    }
+    leaf om-datacenter {
+      description
+        "Openmano datacenter name to use when instantiating
+         the network service.  This is only used when openmano
+         is selected as the cloud account.  This should be superceded
+         by multiple cloud accounts when that becomes available.";
+      type string;
+    }
+  }
+
+  augment /nsr:ns-instance-config {
+    leaf nfvi-polling-period {
+      description
+        "Defines the period (secons) that the NFVI metrics are polled at";
+      type uint64;
+      default 4;
+    }
+  }
+
+  notification nsm-notification {
+    description "Notification for NSM Events.
+        The timestamp of this event is automatically expressed 
+        in human readble format - 1970-01-01T00:00:00Z";
+
+    leaf event {
+      description "Short name of the event";
+      type string;
+    }
+
+    leaf description {
+      description "The description of this event";
+      type string;
+    }
+
+    leaf details {
+      description "The detailed description of this event, in case of errors";
+      type string;
+    }
+  }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-topology.yang b/models/plugins/yang/rw-topology.yang
new file mode 100644 (file)
index 0000000..ff1f2ba
--- /dev/null
@@ -0,0 +1,126 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-topology {
+    namespace "http://riftio.com/ns/riftware-1.0/rw-topology";
+    prefix rw-topology;
+
+    import ietf-inet-types {prefix inet;}
+    import ietf-network {prefix nw;}
+    import ietf-network-topology {prefix nt;}
+    import ietf-l2-topology {prefix l2t;}
+
+    revision "2015-10-20" {
+        description "Initial revision of IP level addressing for L2 host topology";
+    }
+
+    grouping  ip-address-group  {
+      description "IP addresses if present for L2 termination points";
+      container ip-attributes {
+        description "L2 termination points containing IP addresses";
+        list ip-addresses {
+          key ip;
+          leaf ip {
+            type inet:ip-address;
+            description "IPv4 or IPv6 address";
+          }
+        }
+      }
+    } // grouping  ip-address-group
+
+
+    grouping rw-network-attributes {
+      description "RW Topology scope attributes";
+      container rw-network-attributes {
+        description "Containing RW network attributes";
+        leaf name {
+          type string;
+          description "Name of the RW Topology network";
+        }
+        leaf sdn-account-name {
+          type string;
+          description "Name of the SDN account from which topology is got"; 
+        }
+      }
+    }
+
+    grouping  rw-node-attributes {
+      description "RW node attributes";
+      container rw-node-attributes {
+        description "Containing RW node attributes";
+        leaf name {
+          type string;
+          description "Node name";
+        }
+        leaf ovs-bridge-name {
+          type string;
+         description "Name of OVS bridge";
+        } 
+      }
+    }
+
+    grouping rw-link-attributes {
+      description "RW link attributes";
+      container rw-link-attributes {
+        description "Containing RW link attributes";
+        leaf name {
+          type string;
+          description "Link name";
+        }
+      }
+    }
+
+    grouping rw-termination-point-attributes {
+      description "RW termination point attributes";
+      container rw-termination-point-attributes {
+        description "Containing RW TP attributes";
+        leaf description {
+          type string;
+          description "Port description";
+        }
+        uses ip-address-group;
+      }
+    }
+
+    augment "/nw:network" {
+      description
+        "Configuration parameters for the RW network
+         as a whole";
+      uses rw-network-attributes;
+    }
+
+    augment "/nw:network/nw:node" {
+      description
+        "Configuration parameters for RW at the node
+         level";
+      uses rw-node-attributes;
+    }
+
+    augment "/nw:network/nt:link" {
+      description "Augment RW topology link information";
+      uses rw-link-attributes;
+    }
+
+    augment "/nw:network/nw:node/nt:termination-point" {
+      description
+        "Augment RW topology termination point configuration";
+      uses rw-termination-point-attributes;
+    }
+}
diff --git a/models/plugins/yang/rw-vld.yang b/models/plugins/yang/rw-vld.yang
new file mode 100644 (file)
index 0000000..5027480
--- /dev/null
@@ -0,0 +1,39 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-vld
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vld";
+  prefix "rw-vld";
+
+  import vld {
+    prefix "vld";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO VLD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-vlr.yang b/models/plugins/yang/rw-vlr.yang
new file mode 100644 (file)
index 0000000..739dbec
--- /dev/null
@@ -0,0 +1,78 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-vlr
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vlr";
+  prefix "rw-vlr";
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import vlr {
+    prefix "vlr";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  revision 2015-09-30 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO VNFD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  augment /vlr:vlr-catalog/vlr:vlr {
+    leaf cloud-account {
+      description
+        "The cloud account to use when requesting resources for
+         this vlr";
+      type leafref {
+        path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+      }
+    }
+
+    leaf network_pool {
+      description "The network pool the resource was allocated from.";
+      type string;
+    }
+
+    leaf assigned-subnet {
+      description "Subnet added for the VL";
+      type string;
+    }
+
+    leaf operational-status-details {
+      description
+        "The error message in case of a failed VLR operational status";
+      type string;
+    }
+  }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/rw-vnfd.yang b/models/plugins/yang/rw-vnfd.yang
new file mode 100644 (file)
index 0000000..29eb852
--- /dev/null
@@ -0,0 +1,117 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-vnfd
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd";
+  prefix "rw-vnfd";
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import rwvcs-types {
+    prefix "rwvcstypes";
+  }
+
+  import rw-pb-ext { prefix "rwpb"; }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO VNFD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  augment /vnfd:vnfd-catalog/vnfd:vnfd {
+    uses manotypes:control-param;
+    uses manotypes:action-param;
+    leaf meta {
+      description
+        "Any meta-data needed by the UI";
+      type string;
+    }
+    list component {
+      description
+          "This section defines the RIFT.ware
+           virtual components";
+      key "component-name";
+      rwpb:msg-new VcsComponent;
+      rwpb:application-request-point;
+
+      leaf component-name {
+        description "";
+        type string;
+      }
+
+      leaf component-type {
+        description "";
+        type rwvcstypes:component_type;
+        mandatory true;
+      }
+
+      choice component {
+        case rwvcs-rwcollection {
+          uses rwvcstypes:rwvcs-rwcollection;
+        }
+        case rwvcs-rwvm {
+          uses rwvcstypes:rwvcs-rwvm;
+        }
+        case rwvcs-rwproc {
+          uses rwvcstypes:rwvcs-rwproc;
+        }
+        case native-proc {
+          uses rwvcstypes:native-proc;
+        }
+        case rwvcs-rwtasklet {
+          uses rwvcstypes:rwvcs-rwtasklet;
+        }
+      }
+    } // list component
+  }
+
+  augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu {
+    leaf vcs-component-ref {
+      description
+          "This defines the software components using the
+           RIFT.ware Virtual Component System (VCS). This
+           also allows specifying a state machine during
+           the VM startup.
+           NOTE: This is an significant addition to MANO,
+           since MANO doesn't clearly specify a method to
+           identify various software components in a VM.
+           Also using a state machine is not something that
+           is well described in MANO.";
+      type leafref {
+        path "/vnfd:vnfd-catalog/vnfd:vnfd/rw-vnfd:component/rw-vnfd:component-name";
+      }
+    }
+  }
+}
+// vim: sw=2
diff --git a/models/plugins/yang/rw-vnfr.tailf.yang b/models/plugins/yang/rw-vnfr.tailf.yang
new file mode 100644 (file)
index 0000000..6090fcf
--- /dev/null
@@ -0,0 +1,50 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-vnfr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr-annotation";
+  prefix "rw-vnfr-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import rw-vnfr {
+    prefix rw-vnfr;
+  }
+
+  import vnfr {
+    prefix vnfr;
+  }
+
+  tailf:annotate "/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-vnfr:vnfr-console" {
+    tailf:callpoint rw_callpoint;
+  }
+
+}
diff --git a/models/plugins/yang/rw-vnfr.yang b/models/plugins/yang/rw-vnfr.yang
new file mode 100644 (file)
index 0000000..eb027e0
--- /dev/null
@@ -0,0 +1,324 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-vnfr
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vnfr";
+  prefix "rw-vnfr";
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import rw-pb-ext { prefix "rwpb"; }
+
+  import vnfr {
+    prefix "vnfr";
+  }
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import rwvcs-types {
+    prefix "rwvcstypes";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file augments
+       the base MANO VNFD";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  grouping vnfr-operational-events {
+    list operational-events {
+      key "id";
+      description
+        "Recent operational events for VNFR
+        Though the model does not impose any restrictions on the numbe of events, 
+        the max operational events will be limited to the most recent 10"; 
+
+      leaf id {
+        description "The id of the instance";
+        type uint64;
+      }
+
+      leaf timestamp {
+        description
+          "The timestamp of this event expressed as seconds since
+          unix epoch - 1970-01-01T00:00:00Z";
+        type uint32;
+      }
+      leaf event {
+        description "The event";
+        type enumeration {
+          rwpb:enum-type "VnfrOperationalEvent";
+          enum instantiate-rcvd;
+          enum vl-inited;
+          enum vnf-inited;
+          enum running;
+          enum terminate-rcvd;
+          enum vnf-terminated;
+          enum vl-terminated;
+          enum terminated;
+        }
+      }
+      leaf description {
+        description
+          "The description of this event";
+        type string;
+      }
+    }
+  }
+
+  grouping vdur-operational-events {
+    list operational-events {
+      key "id";
+      description
+        "Recent operational events for VDUR
+        Though the model does not impose any restrictions on the numbe of events, 
+        the max operational events will be limited to the most recent 10"; 
+
+      leaf id {
+        description "The id of the instance";
+        type uint64;
+      }
+
+      leaf timestamp {
+        description
+          "The timestamp of this event expressed as seconds since
+          unix epoch - 1970-01-01T00:00:00Z";
+        type uint32;
+      }
+      leaf event {
+        description "The event";
+        type enumeration {
+          rwpb:enum-type "VdurOperationalEvent";
+          enum instantiate-rcvd;
+          enum vm-allocation-requested;
+          enum running;
+          enum terminate-rcvd;
+          enum vm-terminate-requested;
+          enum terminated;
+        }
+      }
+      leaf description {
+        description
+          "The description of this event";
+        type string;
+      }
+    }
+  }
+
+  augment /vnfr:vnfr-catalog/vnfr:vnfr {
+    uses manotypes:action-param;
+    uses manotypes:control-param;
+
+    leaf cloud-account {
+      description
+        "The cloud account to use when requesting resources for
+         this vnf";
+      type leafref {
+        path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+      }
+    }
+
+    leaf om-datacenter {
+      description
+          "Openmano datacenter name to use when instantiating
+          the network service.  This is only used when openmano
+          is selected as the cloud account.  This should be superceded
+          by multiple cloud accounts when that becomes available.";
+      type string;
+    }
+
+    container nfvi-metrics {
+      container vm {
+        leaf label {
+          description
+            "Label to show in UI";
+          type string;
+          default "VM";
+        }
+
+        leaf active-vm {
+          description
+            "The number of active VMs.";
+          type uint64;
+        }
+
+        leaf inactive-vm {
+          description
+            "The number of inactive VMs.";
+          type uint64;
+        }
+      }
+      
+      uses manotypes:nfvi-metrics;
+    }
+
+    list component {
+      description
+          "This section defines the RIFT.ware
+           virtual components";
+      key "component-name";
+      rwpb:msg-new VcsComponentOp;
+      rwpb:application-request-point;
+
+      leaf component-name {
+        description "";
+        type string;
+      }
+
+      leaf component-type {
+        description "";
+        type rwvcstypes:component_type;
+        mandatory true;
+      }
+
+      choice component {
+        case rwvcs-rwcollection {
+          uses rwvcstypes:rwvcs-rwcollection;
+        }
+        case rwvcs-rwvm {
+          uses rwvcstypes:rwvcs-rwvm;
+        }
+        case rwvcs-rwproc {
+          uses rwvcstypes:rwvcs-rwproc;
+        }
+        case native-proc {
+          uses rwvcstypes:native-proc;
+        }
+        case rwvcs-rwtasklet {
+          uses rwvcstypes:rwvcs-rwtasklet;
+        }
+      }
+    } // list component
+
+    uses vnfr-operational-events;
+
+    leaf operational-status-details {
+      description
+        "The error message in case of a failed VNFR operational status";
+      type string;
+    }
+  }
+
+  augment /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur {
+    leaf vm-pool {
+      description
+        "The pool from which this vm was allocated from";
+      type string;
+    }
+
+    container nfvi-metrics {
+      uses manotypes:nfvi-metrics;
+    }
+
+    leaf vcs-component-ref {
+      description
+          "This defines the software components using the
+           RIFT.ware Virtual Component System (VCS). This
+           also allows specifying a state machine during
+           the VM startup.
+           NOTE: This is an significant addition to MANO,
+           since MANO doesn't clearly specify a method to
+           identify various software components in a VM.
+           Also using a state machine is not something that
+           is well described in MANO.";
+      type leafref {
+        path "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:component/rw-vnfr:component-name";
+      }
+    }
+
+    uses vdur-operational-events;
+
+    leaf operational-status-details {
+      description
+        "The error message in case of a failed VDU operational status";
+      type string;
+    }
+  }
+  grouping vnfd-ref-count {
+    list vnfd-ref-count {
+      key "vnfd-id-ref";
+      description "This table maintains the number of VNFRs used by each VNFD";
+
+      leaf vnfd-id-ref {
+        description "Reference to VNFD";
+        type leafref {
+          path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+        }
+      }
+      leaf instance-ref-count {
+        description
+          "Reference count for the number of VNFRs refering this VNFD.
+           Every VNF Record instantiated using this descriptor takes
+           a reference on the VNFD and releases the reference when the
+           virtual network service is terminated. This desciptor cannot
+           be deleted when this counter is non zero";
+        type uint64;
+      }
+    }
+  }
+  augment /vnfr:vnfr-catalog {
+    uses vnfd-ref-count;
+  }
+
+  container vnfr-console {
+    config false;
+    list vnfr {
+      key "id";
+      leaf id {
+        description "Identifier for the VNFR.";
+        type yang:uuid;
+      }
+      list vdur {
+        description "List of Virtual Deployment Units";
+        key "id";
+        leaf id {
+          description "Unique id for the VDU";
+          type yang:uuid;
+        }
+        leaf console-url {
+          description "Console URL for this VDU, if available";
+          type inet:uri;
+        }
+      }
+    }
+  }
+
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/vld.yang b/models/plugins/yang/vld.yang
new file mode 100644 (file)
index 0000000..2747887
--- /dev/null
@@ -0,0 +1,141 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vld
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:vld";
+  prefix "vld";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines
+       the Virtual Link Descriptor (VLD)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  container vld-catalog {
+
+    list vld {
+      key "id";
+
+      leaf id {
+        description "Identifier for the VLD.";
+        type yang:uuid;
+      }
+
+      leaf name {
+        description "Virtual Link Descriptor (VLD) name.";
+        type string;
+      }
+
+      leaf short-name {
+        description "Short name for VLD for UI";
+        type string;
+      }
+
+      leaf vendor {
+        description "Provider of the VLD.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VLD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VLD";
+        type string;
+      }
+
+      leaf type {
+        type manotypes:virtual-link-type;
+      }
+
+      leaf root-bandwidth {
+        description
+            "For ELAN this is the aggregate bandwidth.";
+        type uint64;
+      }
+
+      leaf leaf-bandwidth {
+        description
+            "For ELAN this is the bandwidth of branches.";
+        type uint64;
+      }
+
+      list vnfd-connection-point-ref {
+        description
+            "A list of references to connection points.";
+        key "vnfd-ref member-vnf-index-ref";
+
+        leaf vnfd-ref {
+          description "A reference to a vnfd";
+          type leafref {
+            path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+          }
+        }
+
+        leaf member-vnf-index-ref {
+          description 
+              "A reference to the consituent-vnfd id in nsd. 
+              Should have been a leafref to:
+                '/nsd:nsd-catalog:/nsd:nsd/constituent-vnfd/member-vnf-index-ref'. 
+              Instead using direct leaf to avoid circular reference.";
+          type uint64; 
+        }
+
+        leaf vnfd-connection-point-ref {
+          description 
+              "A reference to a connection point name in a vnfd";
+          type leafref {
+            path "/vnfd:vnfd-catalog/vnfd:vnfd" 
+               + "[vnfd:id = current()/../vld:vnfd-ref]"
+               + "/vnfd:connection-point/vnfd:name";
+          }
+        }
+      }
+
+      // replicate for pnfd container here
+      uses manotypes:provider-network;
+    }
+  }
+}
diff --git a/models/plugins/yang/vlr.tailf.yang b/models/plugins/yang/vlr.tailf.yang
new file mode 100644 (file)
index 0000000..4bed1d2
--- /dev/null
@@ -0,0 +1,37 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vlr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/vlr-annotation";
+  prefix "vlr-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import vlr {
+    prefix vlr;
+  }
+
+  tailf:annotate "/vlr:vlr-catalog" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/models/plugins/yang/vlr.yang b/models/plugins/yang/vlr.yang
new file mode 100644 (file)
index 0000000..ef3d603
--- /dev/null
@@ -0,0 +1,186 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vlr
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:vlr";
+  prefix "vlr";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines
+       the Virtual Link Record (VLR)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  container vlr-catalog {
+    config false;
+
+    list vlr {
+      key "id";
+      unique "name";
+
+      leaf id {
+        description "Identifier for the VLR.";
+        type yang:uuid;
+      }
+
+      leaf name {
+        description "VLR name.";
+        type string;
+      }
+
+      leaf nsr-id-ref {
+        description 
+            "NS instance identifier. 
+             This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
+        type yang:uuid;
+      }
+
+      leaf vld-ref {
+        description
+          "Reference to VLD
+           /nsr:ns-instance-config/nsr:nsr[nsr:id=../nsr-id-ref]/nsd/vld:vld/vld:id";
+        type string;
+      }
+
+      leaf res-id {
+        description "Identifier for resmgr id mapping";
+        type yang:uuid;
+      }
+
+      leaf short-name {
+        description "Short name for VLR for UI";
+        type string;
+      }
+
+      leaf vendor {
+        description "Provider of the VLR.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VLR.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VLR";
+        type string;
+      }
+
+      leaf type {
+        type manotypes:virtual-link-type;
+      }
+
+      leaf root-bandwidth {
+        description
+            "For ELAN this is the aggregate bandwidth.";
+        type uint64;
+      }
+
+      leaf leaf-bandwidth {
+        description
+            "For ELAN this is the bandwidth of branches.";
+        type uint64;
+      }
+
+      leaf create-time {
+        description
+          "Creation timestamp of this Virtual Link.
+          The timestamp is expressed as seconds 
+          since unix epoch - 1970-01-01T00:00:00Z";
+
+        type uint32;
+      }
+
+      leaf network-id {
+        description 
+            "Identifier for the allocated network resource.";
+        type string;
+      }
+
+      leaf vim-network-name {
+        description
+            "Name of network in VIM account. This is used to indicate
+            pre-provisioned network name in cloud account.";
+        type string;
+      }
+
+      // replicate for pnfd container here
+
+      uses manotypes:provider-network;
+      uses manotypes:ip-profile-info;
+      
+      leaf status {
+        description
+            "Status of the virtual link record.";
+        type enumeration {
+          enum LINK_UP;
+          enum DEGRADED;
+          enum LINK_DOWN;
+        }
+      }
+      leaf operational-status {
+        description
+          "The operational status of the Virtual Link
+            init                 : The VL is in init stat.
+            vl-alloc-pending     : The VL alloc is pending in VIM
+            running              : The VL is up  and running in VM
+            vl-terminate-pending : The VL is being terminated in VIM.
+            terminated           : The VL is terminated in the VM.
+            failed               : The VL instantiation failed in VIM.
+          ";
+
+        type enumeration {
+          rwpb:enum-type "VlOperationalStatus";
+          enum init;
+          enum vl-alloc-pending;
+          enum running;
+          enum vl-terminate-pending;
+          enum terminated;
+          enum failed;
+        }
+      }
+    }
+  }
+}
+
diff --git a/models/plugins/yang/vnfd.yang b/models/plugins/yang/vnfd.yang
new file mode 100644 (file)
index 0000000..c035bd3
--- /dev/null
@@ -0,0 +1,515 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vnfd
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfd";
+  prefix "vnfd";
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines
+       the Virtual Network Function (VNF)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  grouping common-connection-point {
+    leaf name {
+      description "Name of the connection point";
+      type string;
+    }
+
+    leaf id {
+      description "Identifier for the internal connection points";
+      type string;
+    }
+
+    leaf short-name {
+      description "Short name of the connection point";
+      type string;
+    }
+
+    leaf type {
+      description "Type of the connection point.";
+      type manotypes:connection-point-type;
+    }
+  }
+
+  grouping virtual-interface {
+    container virtual-interface {
+      description
+          "Container for the virtual interface properties";
+
+      leaf type {
+        description
+            "Specifies the type of virtual interface
+             between VM and host.
+             VIRTIO          : Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+             SR-IOV          : Use SR-IOV interface.
+             E1000           : Emulate E1000 interface.
+             RTL8139         : Emulate RTL8139 interface.
+             PCNET           : Emulate PCNET interface.
+             OM-MGMT         : Used to specify openmano mgmt external-connection type";
+
+        type enumeration {
+          enum OM-MGMT;
+          enum PCI-PASSTHROUGH;
+          enum SR-IOV;
+          enum VIRTIO;
+          enum E1000;
+          enum RTL8139;
+          enum PCNET;
+        }
+        default "VIRTIO";
+      }
+
+      leaf vpci {
+        description
+            "Specifies the virtual PCI address. Expressed in
+             the following format dddd:dd:dd.d. For example
+             0000:00:12.0. This information can be used to
+             pass as metadata during the VM creation.";
+        type string;
+      }
+
+      leaf bandwidth {
+        description
+            "Aggregate bandwidth of the NIC.";
+        type uint64;
+      }
+    }
+  }
+
+  container vnfd-catalog {
+
+    description
+        "Virtual Network Function Descriptor (VNFD).";
+
+    list vnfd {
+      key "id";
+
+      leaf id {
+        description "Identifier for the VNFD.";
+        type string;
+      }
+
+      leaf name {
+        description "VNFD name.";
+        mandatory true;
+        type string;
+      }
+
+      leaf short-name {
+        description "VNFD short name.";
+        type string;
+      }
+
+      leaf vendor {
+        description "Vendor of the VNFD.";
+        type string;
+      }
+
+      leaf logo {
+        description
+            "Vendor logo for the Virtual Network Function";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VNFD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VNFD";
+        type string;
+      }
+
+      uses manotypes:vnf-configuration;
+
+      container mgmt-interface {
+        description
+            "Interface over which the VNF is managed.";
+
+        choice endpoint-type {
+          description
+              "Indicates the type of management endpoint.";
+
+          case ip {
+            description
+                "Specifies the static IP address for managing the VNF.";
+            leaf ip-address {
+              type inet:ip-address;
+            }
+          }
+
+          case vdu-id {
+            description
+                "Use the default management interface on this VDU.";
+            leaf vdu-id {
+              type leafref {
+                path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:id";
+              }
+            }
+          }
+
+          case cp {
+            description
+                "Use the ip address associated with this connection point.";
+            leaf cp {
+              type leafref {
+                path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:connection-point/vnfd:name";
+              }
+            }
+          }
+        }
+
+        leaf port {
+          description
+              "Port for the management interface.";
+          type inet:port-number;
+        }
+
+        container dashboard-params {
+          description "Parameters for the VNF dashboard";
+
+          leaf path {
+            description "The HTTP path for the dashboard";
+            type string;
+          }
+
+          leaf https {
+            description "Pick HTTPS instead of HTTP , Default is false";
+            type boolean;
+          }
+
+          leaf port {
+            description "The HTTP port for the dashboard";
+            type inet:port-number;
+          }
+        }
+      }
+
+      list internal-vld {
+        key "id";
+        description
+            "List of Internal Virtual Link Descriptors (VLD).
+            The internal VLD describes the basic topology of
+            the connectivity (e.g. E-LAN, E-Line, E-Tree)
+            between internal VNF components of the system.";
+
+        leaf id {
+          description "Identifier for the VLD";
+          type string;
+        }
+
+        leaf name {
+          description "Name of the internal VLD";
+          type string;
+        }
+
+        leaf short-name {
+          description "Short name of the internal VLD";
+          type string;
+        }
+
+        leaf description {
+          type string;
+        }
+
+        leaf type {
+          type manotypes:virtual-link-type;
+        }
+
+        leaf root-bandwidth {
+          description
+              "For ELAN this is the aggregate bandwidth.";
+          type uint64;
+        }
+
+        leaf leaf-bandwidth {
+          description
+              "For ELAN this is the bandwidth of branches.";
+          type uint64;
+        }
+
+        leaf-list internal-connection-point-ref {
+          type leafref {
+            path "../../vdu/internal-connection-point/id";
+          }
+        }
+
+        uses manotypes:provider-network;
+      }
+
+      list connection-point {
+        key "name";
+        description
+            "List for external connection points. Each VNF has one
+            or more external connection points. As the name
+            implies that external connection points are used for
+            connecting the VNF to other VNFs or to external networks.
+            Each VNF exposes these connection points to the
+            orchestrator. The orchestrator can construct network
+            services by connecting the connection points between
+            different VNFs. The NFVO will use VLDs and VNFFGs at
+            the network service level to construct network services.";
+
+        uses common-connection-point;
+      }
+
+      list vdu {
+        description "List of Virtual Deployment Units";
+        key "id";
+
+        leaf id {
+          description "Unique id for the VDU";
+          type string;
+        }
+
+        leaf name {
+          description "Unique name for the VDU";
+          type string;
+        }
+
+        leaf description {
+            description "Description of the VDU.";
+            type string;
+        }
+
+        leaf count {
+          description "Number of instances of VDU";
+          type uint64;
+        }
+
+        leaf mgmt-vpci {
+          description
+              "Specifies the virtual PCI address. Expressed in
+             the following format dddd:dd:dd.d. For example
+             0000:00:12.0. This information can be used to
+             pass as metadata during the VM creation.";
+          type string;
+        }
+
+        uses manotypes:vm-flavor;
+        uses manotypes:guest-epa;
+        uses manotypes:vswitch-epa;
+        uses manotypes:hypervisor-epa;
+        uses manotypes:host-epa;
+
+        list alarm {
+          key "alarm-id";
+
+          uses manotypes:alarm;
+        }
+
+        leaf image {
+          description
+            "Image name for the software image.
+             If the image name is found within the VNF packaage it will
+             be uploaded to all cloud accounts during onboarding process.
+             Otherwise, the image must be added to the cloud account with
+             the same name as entered here.
+            ";
+          mandatory true;
+          type string;
+        }
+
+        leaf image-checksum {
+          description
+            "Image md5sum for the software image.
+            The md5sum, if provided, along with the image name uniquely
+            identifies an image uploaded to the CAL.
+            ";
+          type string;
+        }
+
+        choice cloud-init-input {
+          description
+            "Indicates how the contents of cloud-init script are provided.
+             There are 2 choices - inline or in a file";
+
+          case inline {
+            leaf cloud-init {
+              description
+                "Contents of cloud-init script, provided inline, in cloud-config format";
+              type string;
+            }
+          }
+
+          case filename {
+            leaf cloud-init-file {
+              description
+                "Name of file with contents of cloud-init script in cloud-config format";
+                type string;
+            }
+          }
+        }
+
+        list internal-connection-point {
+          key "id";
+          description
+              "List for internal connection points. Each VNFC
+              has zero or more internal connection points.
+              Internal connection points are used for connecting
+              the VNF components internal to the VNF. If a VNF
+              has only one VNFC, it may not have any internal
+              connection points.";
+
+          uses common-connection-point;
+
+          leaf internal-vld-ref {
+            type leafref {
+              path "../../../internal-vld/id";
+            }
+          }
+        }
+
+        list internal-interface {
+          description
+              "List of internal interfaces for the VNF";
+          key name;
+
+          leaf name {
+            description
+                "Name of internal interface. Note that this
+                name has only local significance to the VDU.";
+            type string;
+          }
+
+          leaf vdu-internal-connection-point-ref {
+            type leafref {
+              path "../../internal-connection-point/id";
+            }
+          }
+          uses virtual-interface;
+        }
+
+        list external-interface {
+          description
+              "List of external interfaces for the VNF.
+              The external interfaces enable sending
+              traffic to and from VNF.";
+          key name;
+
+          leaf name {
+            description
+                "Name of the external interface. Note that
+                this name has only local significance.";
+            type string;
+          }
+
+          leaf vnfd-connection-point-ref {
+            description
+              "Name of the external connection point.";
+            type leafref {
+              path "../../../connection-point/name";
+            }
+          }
+          uses virtual-interface;
+        }
+      }
+
+      list vdu-dependency {
+        description
+            "List of VDU dependencies.";
+
+        key vdu-source-ref;
+        leaf vdu-source-ref {
+          type leafref {
+            path "../../vdu/id";
+          }
+        }
+
+        leaf vdu-depends-on-ref {
+          description
+              "Reference to the VDU that
+              source VDU depends.";
+          type leafref {
+            path "../../vdu/id";
+          }
+        }
+      }
+
+      leaf service-function-chain {
+        description "Type of node in Service Function Chaining Architecture";
+
+        type enumeration {
+          enum UNAWARE;
+          enum CLASSIFIER;
+          enum SF;
+          enum SFF;
+        }
+        default "UNAWARE";
+      }
+
+      leaf service-function-type {
+        description
+            "Type of Service Function.
+             NOTE: This needs to map with Service Function Type in ODL to
+             support VNFFG. Service Function Type is manadatory param in ODL
+             SFC. This is temporarily set to string for ease of use";
+            type string;
+      }
+
+      uses manotypes:monitoring-param;
+
+      list placement-groups {
+        description "List of placement groups at VNF level";
+
+        key "name";
+        uses manotypes:placement-group-info;
+        
+        list member-vdus {
+
+          description
+              "List of VDUs that are part of this placement group";
+          key "member-vdu-ref";
+
+          leaf member-vdu-ref {
+            type leafref {
+              path "../../../vdu/id";
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+// vim: sw=2
diff --git a/models/plugins/yang/vnffgd.yang b/models/plugins/yang/vnffgd.yang
new file mode 100644 (file)
index 0000000..99347ae
--- /dev/null
@@ -0,0 +1,83 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vnffgd
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:vnffgd";
+  prefix "vnffgd";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2014-10-27 {
+    description
+      "Initial revision. This YANG file defines 
+       the VNF Forwarding Graph Descriptor (VNFFGD)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  container vnffgd-catalog {
+
+    list vnffgd {
+      key "id";
+
+      leaf name {
+        description "VNF Forwarding Graph Descriptor name.";
+        type string;
+      }
+
+      leaf id {
+        description "Identifier for the VNFFGD.";
+        type yang:uuid;
+      }
+
+      leaf provider {
+        description "Provider of the VNFFGD.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VNFFGD.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VNFFGD";
+        type string;
+      }
+
+      //TODO: Add more content here
+    }
+  }
+}
diff --git a/models/plugins/yang/vnfr.tailf.yang b/models/plugins/yang/vnfr.tailf.yang
new file mode 100644 (file)
index 0000000..150dc9a
--- /dev/null
@@ -0,0 +1,45 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vnfr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/vnfr-annotation";
+  prefix "vnfr-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import vnfr {
+    prefix vnfr;
+  }
+
+  tailf:annotate "/vnfr:vnfr-catalog" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/vnfr:create-alarm" {
+    tailf:actionpoint rw_actionpoint;
+  }
+
+  tailf:annotate "/vnfr:destroy-alarm" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/models/plugins/yang/vnfr.yang b/models/plugins/yang/vnfr.yang
new file mode 100644 (file)
index 0000000..4a427d4
--- /dev/null
@@ -0,0 +1,526 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module vnfr
+{
+  namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfr";
+  prefix "vnfr";
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import vnfd {
+    prefix "vnfd";
+  }
+
+  import nsd {
+    prefix "nsd";
+  }
+
+  import vlr {
+    prefix "vlr";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  revision 2015-09-10 {
+    description
+      "Initial revision. This YANG file defines
+       the Virtual Network Function Record (VNFR)";
+    reference
+      "Derived from earlier versions of base YANG files";
+  }
+
+  grouping placement-group-info {
+    list placement-groups-info {
+      description
+          "
+          Placement groups to which this VDU belongs and its
+          cloud construct
+          ";
+      key "name";
+      uses manotypes:placement-group-info;
+      uses manotypes:placement-group-input;
+    }  
+  }
+  
+  
+  grouping virtual-interface {
+    container virtual-interface {
+      description
+          "Container for the virtual interface properties";
+
+      leaf type {
+        description
+            "Specifies the type of virtual interface
+             between VM and host.
+             VIRTIO          : Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+             SR-IOV          : Use SR-IOV interface.";
+        type enumeration {
+          enum VIRTIO;
+          enum PCI-PASSTHROUGH;
+          enum SR-IOV;
+        }
+      }
+
+      leaf bandwidth {
+        description
+            "Aggregate bandwidth of the NIC.";
+        type uint64;
+      }
+
+      leaf ovs-offload {
+        description
+            "Defines if the NIC supports OVS offload.
+             MANDATORY : OVS offload support in the NIC is mandatory.
+             PREFERRED : OVS offload support in the NIC is preferred.";
+        type enumeration {
+          enum MANDATORY;
+          enum PREFERRED;
+        }
+      }
+
+      leaf vendor-id {
+        description
+            "Specifies the vendor specific id for
+             the device. This is used when a NIC from
+             specific HW vendor is required.";
+        type string;
+      }
+
+      leaf datapath-library {
+        description
+            "Specifies the name and version of the datapath
+             library the NIC is expected to support.";
+        type string;
+      }
+
+      leaf provider-network-name {
+        description
+            "Name of the provider network to which this
+             NIC is attached.";
+        type string;
+      }
+    }
+  }
+
+  container vnfr-catalog {
+    config false;
+    list vnfr {
+      description
+          "Virtual Network Function Record (VNFR).";
+      key "id";
+      unique "name";
+
+      leaf id {
+        description "Identifier for the VNFR.";
+        type yang:uuid;
+      }
+
+      leaf nsr-id-ref {
+        description
+            "NS instance identifier.
+             This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
+        type yang:uuid;
+      }
+
+      leaf member-vnf-index-ref {
+        description "Reference to member VNF index in Network service.";
+        type leafref {
+          path "/nsd:nsd-catalog/nsd:nsd/nsd:constituent-vnfd/nsd:member-vnf-index";
+        }
+      }
+
+      leaf dashboard-url {
+        description "Dashboard URL";
+        type inet:uri;
+      }
+
+      leaf name {
+        description "VNFR name.";
+        type string;
+      }
+
+      leaf short-name {
+        description "VNFR short name.";
+        type string;
+      }
+
+      leaf vendor {
+        description "Vendor of the VNFR.";
+        type string;
+      }
+
+      leaf description {
+        description "Description of the VNFR.";
+        type string;
+      }
+
+      leaf version {
+        description "Version of the VNFR";
+        type string;
+      }
+
+      leaf create-time {
+        description 
+          "Creation timestamp of this Virtual Network 
+          Function.  The timestamp is expressed as 
+          seconds since unix epoch - 1970-01-01T00:00:00Z";
+
+        type uint32;
+      }
+
+      leaf vnfd-ref {
+        description "Reference to VNFD";
+        type leafref {
+          path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+        }
+      }
+
+      // Use parameters provided here to configure this VNF
+      uses manotypes:vnf-configuration;
+
+      // Mainly used by Mon-params & dashboard url
+      container mgmt-interface {
+        leaf ip-address {
+          type inet:ip-address;
+        }
+        leaf port {
+          type inet:port-number;
+        }
+      }
+
+      list internal-vlr {
+        key "vlr-ref";
+
+        leaf vlr-ref {
+          description "Reference to a VLR record in the VLR catalog";
+          type leafref {
+            path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+          }
+        }
+
+        leaf-list internal-connection-point-ref {
+          type leafref {
+            path "../../vdur/internal-connection-point/id";
+          }
+        }
+      }
+
+      list connection-point {
+        key "name";
+        description
+            "List for external connection points. Each VNF has one
+             or more external connection points. As the name
+             implies that external connection points are used for
+             connecting the VNF to other VNFs or to external networks.
+             Each VNF exposes these connection points to the
+             orchestrator. The orchestrator can construct network
+             services by connecting the connection points between
+             different VNFs. The NFVO will use VLDs and VNFFGs at
+             the network service level to construct network services.";
+
+        uses vnfd:common-connection-point;
+
+        leaf vlr-ref {
+          description 
+              "Reference to the VLR associated with this connection point";
+          type  leafref {
+            path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+          }
+        }
+
+        leaf ip-address {
+          description 
+              "IP address assigned to the external connection point";
+          type inet:ip-address;
+        }
+        leaf connection-point-id {
+          rwpb:field-inline "true";
+          rwpb:field-string-max 64;
+          type string;
+        }
+      }
+
+      list vdur {
+        description "List of Virtual Deployment Units";
+        key "id";
+        unique "name";
+
+        leaf id {
+          description "Unique id for the VDU";
+          type yang:uuid;
+        }
+
+        leaf name {
+          description "name of the instantiated VDUR";
+          type string;
+        }
+
+        leaf vdu-id-ref {
+          type leafref {
+            path "/vnfd:vnfd-catalog/vnfd:vnfd" 
+               + "[vnfd:id = current()/../../vnfr:vnfd-ref]"
+               + "/vnfd:vdu/vnfd:id";
+          }
+        }
+
+        leaf vim-id {
+          description "Allocated VM resource id";
+          type string;
+        }
+
+        leaf flavor-id {
+          description "VIM assigned flavor id";
+          type string;
+        }
+
+        leaf image-id {
+          description "VIM assigned image id";
+          type string;
+        }
+
+        leaf management-ip {
+          description "Management IP address";
+          type inet:ip-address;
+        }
+
+        leaf vm-management-ip {
+          description "VM Private Management IP address";
+          type inet:ip-address;
+        }
+
+        leaf console-url {
+          description "Console URL for this VDU, if available";
+          type inet:uri;
+        }
+
+        uses manotypes:vm-flavor;
+        uses manotypes:guest-epa;
+        uses manotypes:vswitch-epa;
+        uses manotypes:hypervisor-epa;
+        uses manotypes:host-epa;
+
+        list alarms {
+          description
+              "A list of the alarms that have been created for this VDU";
+
+          key "alarm-id";
+          uses manotypes:alarm;
+        }
+
+        list internal-connection-point {
+          key "id";
+          description
+              "List for internal connection points. Each VNFC
+               has zero or more internal connection points.
+               Internal connection points are used for connecting
+               the VNF components internal to the VNF. If a VNF
+               has only one VNFC, it may not have any internal
+               connection points.";
+
+          uses vnfd:common-connection-point;
+
+          leaf ip-address {
+            description 
+                "IP address assigned to the external connection point";
+            type inet:ip-address;
+          }
+        }
+
+        list internal-interface {
+          description
+              "List of internal interfaces for the VNF";
+          key name;
+
+          leaf name {
+            description
+                "Name of internal interface. Note that this
+                 name has only local significance to the VDU.";
+            type string;
+          }
+
+          leaf vdur-internal-connection-point-ref {
+            type leafref {
+              path "../../internal-connection-point/id";
+            }
+          }
+          uses virtual-interface;
+        }
+
+        list external-interface {
+          description
+              "List of external interfaces for the VNF.
+               The external interfaces enable sending
+               traffic to and from VNF.";
+          key name;
+
+          leaf name {
+            description
+                "Name of the external interface. Note that
+                 this name has only local significance.";
+            type string;
+          }
+
+          leaf vnfd-connection-point-ref {
+            description
+              "Name of the external connection point.";
+            type leafref {
+              path "../../../connection-point/name";
+            }
+          }
+          uses virtual-interface;
+        }
+        leaf operational-status {
+          description
+            "The operational status of the VDU 
+              init                : The VDU has just started.
+              vm-init-phase       : The VDUs in the VNF is being created in VIM.
+              vm-alloc-pending    : The  VM alloc is pending in VIM
+              running             : The VDU is active in VM
+              terminate           : The VDU is being terminated
+              vm-terminate-phase  : The VDU in the VNF is being terminated in VIM.
+              terminated          : The VDU is in the terminated state.
+              failed              : The VDU  instantiation failed.
+            ";
+
+          type enumeration {
+            rwpb:enum-type "VduOperationalStatus";
+            enum init;
+            enum vm-init-phase;
+            enum vm-alloc-pending;
+            enum running;
+            enum terminate;
+            enum vl-terminate-phase;
+            enum terminated;
+            enum failed;
+          }
+        }
+        uses placement-group-info;
+      }
+
+      uses manotypes:monitoring-param;
+
+      leaf operational-status {
+        description
+          "The operational status of the VNFR instance
+            init                : The VNF has just started.
+            vl-init-phase       : The internal VLs in the VNF are being instantiated.
+            vm-init-phase       : The VMs for VDUs in the VNF are being instantiated.
+            running             : The VNF is in running state.
+            terminate           : The VNF is being terminated.
+            vm-terminate-phase  : The VMs in the VNF are being terminated.
+            vl-terminate-phase  : The internal VLs in the VNF are being terminated.
+            terminated          : The VNF is in the terminated state.
+            failed              : The VNF instantiation failed
+          ";
+
+        type enumeration {
+          rwpb:enum-type "VnfrOperationalStatus";
+          enum init;
+          enum vl-init-phase;
+          enum vm-init-phase;
+          enum running;
+          enum terminate;
+          enum vm-terminate-phase;
+          enum vl-terminate-phase;
+          enum terminated;
+          enum failed;
+        }
+      }
+      leaf config-status {
+        description
+          "The configuration status of the NS instance
+            configuring: At least one of the VNFs in this instance is in configuring state
+            configured:  All the VNFs in this NS instance are configured or config-not-needed state
+          ";
+
+        type enumeration {
+          enum configuring {
+            value 1;
+          }
+          enum configured {
+            value 2;
+          }
+          enum failed {
+            value 3;
+          }
+          enum config-not-needed {
+            value 4;
+          }
+        }
+      }
+      uses placement-group-info;
+    }
+  }
+
+  rpc create-alarm {
+    description "Create an alert for a running VDU";
+    input {
+      leaf cloud-account {
+        mandatory true;
+        type string;
+      }
+
+      leaf vdur-id {
+        mandatory true;
+        type string;
+      }
+
+      container alarm {
+        uses manotypes:alarm;
+      }
+    }
+
+    output {
+      leaf alarm-id {
+        type string;
+      }
+    }
+  }
+
+  rpc destroy-alarm {
+    description "Destroy an alert that is associated with a running VDU";
+    input {
+      leaf cloud-account {
+        mandatory true;
+        type string;
+      }
+
+      leaf alarm-id {
+        mandatory true;
+        type string;
+      }
+    }
+  }
+}
+
diff --git a/rwcal/CMakeLists.txt b/rwcal/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8eba04a
--- /dev/null
@@ -0,0 +1,60 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwcal)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs src plugins test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+install(FILES include/riftware/rwcal-api.h
+  DESTINATION usr/include/riftware
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+    PROGRAMS
+    etc/userdata-template
+  DESTINATION etc
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+
+rift_python_install_tree(
+  FILES
+    rift/cal/client.py
+    rift/cal/server/__init__.py
+    rift/cal/server/app.py
+    rift/cal/server/operations.py
+    rift/cal/server/server.py
+    rift/cal/utils.py    
+    rift/cal/rwcal_status.py
+  PYTHON3_ONLY
+  COMPONENT rwcal-1.0)
+
+install(
+  PROGRAMS
+    rift/cal/cloudsim
+  DESTINATION usr/bin
+  COMPONENT rwcal-1.0
+  )
+
diff --git a/rwcal/Makefile b/rwcal/Makefile
new file mode 100644 (file)
index 0000000..14f3400
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/etc/userdata-template b/rwcal/etc/userdata-template
new file mode 100644 (file)
index 0000000..3195864
--- /dev/null
@@ -0,0 +1,33 @@
+#cloud-config
+
+# run commands
+# default: none
+# runcmd contains a list of either lists or a string
+# each item will be executed in order at rc.local like level with
+# output to the console
+# - if the item is a list, the items will be properly executed as if
+#   passed to execve(3) (with the first arg as the command).
+# - if the item is a string, it will be simply written to the file and
+#   will be interpreted by 'sh'
+#
+# Note, that the list has to be proper yaml, so you have to escape
+# any characters yaml would eat (':' can be problematic)
+runcmd:
+ - [ ls, -l, / ]
+
+salt_minion:
+  conf:
+    master: {master_ip}
+    id: {lxcname}
+    acceptance_wait_time: 1
+    recon_default: 100
+    recon_max: 1000
+    recon_randomize: False
+    log_level: debug
+
+# For some unknown reason, the minion sometimes does not start
+# (and doesn't even leave a log file).  Force a start just in case
+runcmd:
+ - echo Sleeping for 5 seconds and attempting to start minion
+ - sleep 5
+ - /bin/systemctl start salt-minion.service
diff --git a/rwcal/include/riftware/rwcal-api.h b/rwcal/include/riftware/rwcal-api.h
new file mode 100644 (file)
index 0000000..6ef5f6a
--- /dev/null
@@ -0,0 +1,561 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+/**
+ * @file rwvx.h
+ * @author Justin Bronder (justin.bronder@riftio.com)
+ * @date 09/29/2014
+ * @brief Top level API include for rwcal submodule
+ */
+
+#ifndef __RWCAL_API_H__
+#define __RWCAL_API_H__
+
+#include <stdbool.h>
+
+#include <libpeas/peas.h>
+
+#include <rwcal.h>
+#include <rwlib.h>
+#include <rw-manifest.pb-c.h>
+#include <rw_vx_plugin.h>
+
+#include "rwlog.h"
+
+__BEGIN_DECLS
+
+struct rwcal_module_s {
+  rw_vx_framework_t * framework;
+  rw_vx_modinst_common_t *mip;
+
+  PeasExtension * cloud;
+  RwCalCloud * cloud_cls;
+  RwCalCloudIface * cloud_iface;
+
+  rwlog_ctx_t *rwlog_instance;
+};
+typedef struct rwcal_module_s * rwcal_module_ptr_t;
+
+// Redefine yang autonames
+typedef RWPB_E(RwManifest_RwcalCloudType) rwcal_cloud_type;
+
+/*
+ * Allocate a rwcal module.  Once allocated, the clients within
+ * the module still need to be initialized.  For rwzk, see
+ * rwcal_rwzk_{kazoo,zake}_init().  For rwcloud, see
+ * rwcal_cloud_init().  It is a fatal error to attempt to use any
+ * client before it has been initialized.  However, it is
+ * perfectly fine to not initialize a client that will remain
+ * unused.  Note that every function contains the client that it
+ * will use as part of the name, just after the rwcal_ prefix.
+ *
+ * @return - rwcal module handle or NULL on failure.
+ */
+rwcal_module_ptr_t rwcal_module_alloc();
+
+/*
+ * Deallocate a rwcal module.
+ *
+ * @param - pointer to the rwcal module to be deallocated.
+ */
+void rwcal_module_free(rwcal_module_ptr_t * rwcal);
+
+
+/*
+ * Initialize the rwcal cloud controller.
+ *
+ * key/secret for various cloud types:
+ *  EC2: ACCESS_ID/SECRET_KEY
+ *
+ * @param rwcal       - module handle.
+ * @return        - RW_STATUS_SUCCESS,
+ *                  RW_STATUS_NOTFOUND if the type is unknown,
+ *                  RW_STATUS_FAILURE otherwise.
+ */
+rw_status_t rwcal_cloud_init(rwcal_module_ptr_t rwcal);
+
+/*
+ * Get a list of the names of the available images that can be
+ * used to start a new VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_names - on success, contains a NULL-terminated
+ *                      list of image names.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_image_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **images);
+
+/*
+ * Delete Image.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param image_id    - id of image to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_image(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * image_id);
+
+/*
+ * Create a flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor      - rwpb_gi_Rwcal_FlavorInfoItem object describing the
+ *                      flavor to be created
+ * @param flavor_id   - on success, contains a NULL-terminated string containing the new flavor_id
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_FlavorInfoItem *flavor,
+    char *flavor_id);
+
+
+/*
+ * Delete flavor.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of flavor to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id);
+
+/*
+ * Get a specific flavor
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavor_id   - id of the flavor to return
+ * @param flavir      - rwpb_gi_Rwcal_FlavorInfoItem object containing the
+ *                      details of the requested flavor
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * flavor_id,
+    rwpb_gi_Rwcal_FlavorInfoItem **flavor);
+
+/*
+ * Get a list of the details for all flavors
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param flavors     - on success, contains a list of flavor info objects
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_flavor_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **flavors);
+
+/*
+ * Create a virtual machine.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm          - the information that defines what kind of VM will be
+ *                      created
+ * @param vm_id       - on success, contains a NULL-terminated string
+ *                      containing the new vm id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VMInfoItem *vm,
+    char **vm_id);
+
+/*
+ * Delete VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Reboot VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of vm to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_reboot_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Start VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to start
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_start_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Stop VM.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vm_id       - id of a vm to stop
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_stop_vm(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * vm_id);
+
+/*
+ * Get a list of the names of the available vms
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param vms         - on success, contains a NULL-terminated
+ *                      list of vms.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_vm_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources** vms);
+
+/*
+ * Create a tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_name - name to assign to the tenant.
+ * @param tenant_info - on success, contains a NULL-terminated list of tenant_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_name,
+    char *** tenant_info);
+
+/*
+ * Delete tenant.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenant_id   - id of tenant to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_tenant(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * tenant_id);
+
+/*
+ * Get a list of the available tenants
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param tenants     - on success, contains a NULL-terminated
+ *                      list of tenants.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_tenant_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **tenants);
+
+/*
+ * Create a role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_name   - name to assign to the role.
+ * @param role_info   - on success, contains a NULL-terminated list of role_info
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_name,
+    char *** role_info);
+
+/*
+ * Delete role.
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param role_id     - id of role to be deleted
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_role(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char * role_id);
+
+/*
+ * Get a list of the available roles
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param roles       - on success, contains a NULL-terminated
+ *                      list of roles.
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_role_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **roles);
+
+/*
+ * Add a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host        - host info
+ * @param host_id     - on success, contains a NULL-terminated string
+ *                      containing the new host_id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_add_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_HostInfoItem *host,
+    char **host_id);
+
+/*
+ * Remove a new host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_remove_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id);
+
+/*
+ * Get a specific host
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param host_id     - the id of the host to return
+ * @param host        - the requested host info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *host_id,
+    rwpb_gi_Rwcal_HostInfoItem **host);
+
+/*
+ * Get a list of hosts
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param hosts       - on success, contains a NULL-terminated list of hosts.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_host_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **hosts);
+
+/*
+ * Create a new port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port        - port info
+ * @param port_id     - on success, contains a NULL-terminated string
+ *                      containing the new port id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_PortInfoItem *port,
+    char **port_id);
+
+/*
+ * Delete a port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id);
+
+/*
+ * Get a specific port
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param port_id     - the id of the port to return
+ * @param port        - the requested port info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *port_id,
+    rwpb_gi_Rwcal_PortInfoItem **port);
+
+/*
+ * Get a list of ports
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param ports       - on success, contains a NULL-terminated list of ports.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_port_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **ports);
+
+/*
+ * Create a new network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - network info
+ * @param network_id  - on success, contains a NULL-terminated string
+ *                      containing the new network id
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_create_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem *network,
+    char **network_id);
+
+/*
+ * Delete a network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to remove
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_delete_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id);
+
+/*
+ * Get a specific network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network_id  - the id of the network to return
+ * @param network     - the requested network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    const char *network_id,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a the management network
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param network     - the management network info
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_management_network(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_NetworkInfoItem **network);
+
+/*
+ * Get a list of networks
+ *
+ * @param rwcal       - module handle.
+ * @param account     - cloud account information.
+ * @param networks    - on success, contains a NULL-terminated list of networks.
+ *
+ * @return            - rw_status_t
+ */
+rw_status_t rwcal_get_network_list(
+    rwcal_module_ptr_t rwcal,
+    rwpb_gi_Rwcal_CloudAccount *account,
+    rwpb_gi_Rwcal_VimResources **networks);
+
+/*
+ * Get a RwLog Context so that log messages can go to rwlog
+ *
+ * @param rwcal       - module handle.
+ *
+ * @return            - rwlog_ctx_t
+ */
+rwlog_ctx_t *rwcal_get_rwlog_ctx(rwcal_module_ptr_t rwcal);
+
+__END_DECLS
+
+#endif
+
+
diff --git a/rwcal/plugins/CMakeLists.txt b/rwcal/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..28c67ce
--- /dev/null
@@ -0,0 +1,23 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 2014/05/22
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs vala yang rwcalproxytasklet)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt b/rwcal/plugins/rwcalproxytasklet/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b700ca6
--- /dev/null
@@ -0,0 +1,31 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcalproxytasklet rwcalproxytasklet.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/rwcalproxytasklet/__init__.py
+    rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
+  COMPONENT rwcalproxytasklet-1.0
+  PYTHON3_ONLY)
diff --git a/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/__init__.py
new file mode 100644 (file)
index 0000000..94af0b3
--- /dev/null
@@ -0,0 +1 @@
+from .rwcalproxytasklet import RwCalProxyTasklet
diff --git a/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py b/rwcal/plugins/rwcalproxytasklet/rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644 (file)
index 0000000..bb2c355
--- /dev/null
@@ -0,0 +1,633 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwcalproxytasklet.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@date 2015-10-20
+"""
+
+import asyncio
+import collections
+import concurrent.futures
+import logging
+import os
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwCal', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang,
+    RwTypes,
+)
+
+import rw_peas
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalCallFailure(Exception):
+    pass
+
+
+class RPCParam(object):
+    def __init__(self, key, proto_type=None):
+        self.key = key
+        self.proto_type = proto_type
+
+
+class CalRequestHandler(tornado.web.RequestHandler):
+    def initialize(self, log, loop, cal, account, executor, cal_method,
+                   input_params=None, output_params=None):
+        self.log = log
+        self.loop = loop
+        self.cal = cal
+        self.account = account
+        self.executor = executor
+        self.cal_method = cal_method
+        self.input_params = input_params
+        self.output_params = output_params
+
+    def wrap_status_fn(self, fn, *args, **kwargs):
+        ret = fn(*args, **kwargs)
+        if not isinstance(ret, collections.Iterable):
+            ret = [ret]
+
+        rw_status = ret[0]
+        if type(rw_status) != RwTypes.RwStatus:
+            raise ValueError("First return value of %s function was not a RwStatus" %
+                             fn.__name__)
+
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self.log.error(msg)
+            raise CalCallFailure(msg)
+
+        return ret[1:]
+
+    @tornado.gen.coroutine
+    def post(self):
+        def body_to_cal_args():
+            cal_args = []
+            if self.input_params is None:
+                return cal_args
+
+            input_dict = tornado.escape.json_decode(self.request.body)
+            if len(input_dict) != len(self.input_params):
+                raise ValueError("Got %s parameters, expected %s" %
+                                 (len(input_dict), len(self.input_params)))
+
+            for input_param in self.input_params:
+                key = input_param.key
+                value = input_dict[key]
+                proto_type = input_param.proto_type
+
+                if proto_type is not None:
+                    proto_cls = getattr(RwcalYang, proto_type)
+                    self.log.debug("Deserializing into %s type", proto_cls)
+                    value = proto_cls.from_dict(value)
+
+                cal_args.append(value)
+
+            return cal_args
+
+        def cal_return_vals(return_vals):
+            output_params = self.output_params
+            if output_params is None:
+                output_params = []
+
+            if len(return_vals) != len(output_params):
+                raise ValueError("Got %s return values.  Expected %s",
+                                 len(return_vals), len(output_params))
+
+            write_dict = {"return_vals": []}
+            for i, output_param in enumerate(output_params):
+                key = output_param.key
+                proto_type = output_param.proto_type
+                output_value = return_vals[i]
+
+                if proto_type is not None:
+                    output_value = output_value.as_dict()
+
+                return_val = {
+                        "key": key,
+                        "value": output_value,
+                        "proto_type": proto_type,
+                        }
+
+                write_dict["return_vals"].append(return_val)
+
+            return write_dict
+
+        @asyncio.coroutine
+        def handle_request():
+            self.log.debug("Got cloudsimproxy POST request: %s", self.request.body)
+            cal_args = body_to_cal_args()
+
+            # Execute the CAL request in a seperate thread to prevent
+            # blocking the main loop.
+            return_vals = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.wrap_status_fn,
+                    getattr(self.cal, self.cal_method),
+                    self.account,
+                    *cal_args
+                    )
+
+            return cal_return_vals(return_vals)
+
+        f = asyncio.ensure_future(handle_request(), loop=self.loop)
+        return_dict = yield tornado.platform.asyncio.to_tornado_future(f)
+
+        self.log.debug("Responding to %s RPC with %s", self.cal_method, return_dict)
+
+        self.clear()
+        self.set_status(200)
+        self.write(return_dict)
+
+
+class CalProxyApp(tornado.web.Application):
+    def __init__(self, log, loop, cal_interface, cal_account):
+        self.log = log
+        self.loop = loop
+        self.cal = cal_interface
+        self.account = cal_account
+
+        attrs = dict(
+            log=self.log,
+            loop=self.loop,
+            cal=cal_interface,
+            account=cal_account,
+            # Create an executor with a single worker to prevent
+            # having multiple simulteneous calls into CAL (which is not threadsafe)
+            executor=concurrent.futures.ThreadPoolExecutor(1)
+            )
+
+        def mk_attrs(cal_method, input_params=None, output_params=None):
+            new_attrs = {
+                    "cal_method": cal_method,
+                    "input_params": input_params,
+                    "output_params": output_params
+                    }
+            new_attrs.update(attrs)
+
+            return new_attrs
+
+        super(CalProxyApp, self).__init__([
+            (r"/api/get_image_list", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image_list",
+                    output_params=[
+                        RPCParam("images", "VimResources"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/create_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_image",
+                    input_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("image_id"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/delete_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="delete_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/get_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    output_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/create_vm", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_vm",
+                    input_params=[
+                        RPCParam("vm", "VMInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("vm_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/start_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="start_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/stop_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="stop_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/reboot_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="reboot_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm_list",
+                        output_params=[
+                            RPCParam("vms", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        output_params=[
+                            RPCParam("vms", "VMInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_flavor",
+                        input_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor_list",
+                        output_params=[
+                            RPCParam("flavors", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_network",
+                        input_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network_list",
+                        output_params=[
+                            RPCParam("networks", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_management_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_management_network",
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_port",
+                        input_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        output_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port_list",
+                        output_params=[
+                            RPCParam("ports", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_virtual_link",
+                        input_params=[
+                            RPCParam("link_params", "VirtualLinkReqParams"),
+                            ],
+                        output_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VirtualLinkInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUInitParams"),
+                            ],
+                        output_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/modify_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="modify_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUModifyParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VDUInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    )
+            ])
+
+
+class RwCalProxyTasklet(rift.tasklets.Tasklet):
+    HTTP_PORT = 9002
+    cal_interface = None
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self.app = None
+        self.server = None
+
+    def get_cal_interface(self):
+        if RwCalProxyTasklet.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsim', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            RwCalProxyTasklet.cal_interface = plugin.get_interface("Cloud")
+            RwCalProxyTasklet.cal_interface.init(self.log_hdl)
+
+        return RwCalProxyTasklet.cal_interface
+
+    def start(self):
+        """Tasklet entry point"""
+        self.log.setLevel(logging.DEBUG)
+
+        super().start()
+
+        cal = self.get_cal_interface()
+        account = RwcalYang.CloudAccount(account_type="cloudsim")
+
+        self.app = CalProxyApp(self.log, self.loop, cal, account)
+        self._dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwcalYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                io_loop=io_loop,
+                )
+
+        self.log.info("Starting Cal Proxy Http Server on port %s",
+                      RwCalProxyTasklet.HTTP_PORT)
+        self.server.listen(RwCalProxyTasklet.HTTP_PORT)
+
+    def stop(self):
+      try:
+         self.server.stop()
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in LP stop:", sys.exc_info()[0])
+         raise
+
+    @asyncio.coroutine
+    def init(self):
+        pass
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py b/rwcal/plugins/rwcalproxytasklet/rwcalproxytasklet.py
new file mode 100644 (file)
index 0000000..c0a9c3f
--- /dev/null
@@ -0,0 +1,29 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwcalproxytasklet
+
+
+class Tasklet(rift.tasklets.rwcalproxytasklet.RwCalProxyTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwcal/plugins/vala/CMakeLists.txt b/rwcal/plugins/vala/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3482277
--- /dev/null
@@ -0,0 +1,75 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf / Anil Gunturu
+# Creation Date: 05/22/2014
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwcal)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwCal-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    rw_log-1.0
+  VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwcal_cloudsim
+  rwcal_cloudsimproxy
+  rwcal_mock
+  rwcal_openstack
+  rwcal_openmano
+  rwcal_aws
+  rwcal_openmano_vimconnector
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcal/plugins/vala/Makefile b/rwcal/plugins/vala/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal.vala b/rwcal/plugins/vala/rwcal.vala
new file mode 100644 (file)
index 0000000..a14388e
--- /dev/null
@@ -0,0 +1,248 @@
+namespace RwCal {
+
+  public class RwcalStatus : GLib.Object {
+    public RwTypes.RwStatus status;
+    public string error_msg;
+    public string traceback;
+  }
+
+  public interface Cloud: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Cloud Account Credentails Validation related API
+     */
+    public abstract RwTypes.RwStatus validate_cloud_creds(
+      Rwcal.CloudAccount account,
+      out Rwcal.CloudConnectionStatus status);
+
+    /*
+     * Image related APIs
+     */
+    public abstract RwTypes.RwStatus get_image_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources images);
+
+    public abstract RwTypes.RwStatus create_image(
+      Rwcal.CloudAccount account,
+      Rwcal.ImageInfoItem image,
+      out string image_id);
+
+    public abstract RwTypes.RwStatus delete_image(
+      Rwcal.CloudAccount account,
+      string image_id);
+
+    public abstract RwTypes.RwStatus get_image(
+        Rwcal.CloudAccount account,
+        string image_id,
+        out Rwcal.ImageInfoItem image);
+
+    /*
+     * VM Releated APIs
+     */
+    public abstract RwTypes.RwStatus create_vm(
+      Rwcal.CloudAccount account,
+      Rwcal.VMInfoItem vm,
+      out string vm_id);
+
+    public abstract RwTypes.RwStatus start_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus stop_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus delete_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus reboot_vm(
+      Rwcal.CloudAccount account,
+      string vm_id);
+
+    public abstract RwTypes.RwStatus get_vm_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources vms);
+
+    public abstract RwTypes.RwStatus get_vm(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwcal.VMInfoItem vm);
+
+    /*
+     * Flavor related APIs
+     */
+    public abstract RwTypes.RwStatus create_flavor(
+      Rwcal.CloudAccount account,
+      Rwcal.FlavorInfoItem flavor_info_item,
+      out string flavor_id);
+
+    public abstract RwTypes.RwStatus delete_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id);
+
+    public abstract RwTypes.RwStatus get_flavor_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources flavors);
+
+    public abstract RwTypes.RwStatus get_flavor(
+      Rwcal.CloudAccount account,
+      string flavor_id,
+      out Rwcal.FlavorInfoItem flavor);
+
+
+    /*
+     * Tenant related APIs
+     */
+    public abstract RwTypes.RwStatus create_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] tenant_info);
+
+    public abstract RwTypes.RwStatus delete_tenant(
+      Rwcal.CloudAccount account,
+      string tenant_id);
+
+    public abstract RwTypes.RwStatus get_tenant_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources tenants);
+
+    /*
+     * Role related APIs
+     */
+    public abstract RwTypes.RwStatus create_role(
+      Rwcal.CloudAccount account,
+      string role_name,
+      [CCode (array_length = false, array_null_terminated = true)]
+      out string [] role_info);
+
+    public abstract RwTypes.RwStatus delete_role(
+      Rwcal.CloudAccount account,
+      string role_id);
+
+    public abstract RwTypes.RwStatus get_role_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources roles);
+
+    /*
+     * Port related APIs
+     */
+    public abstract RwTypes.RwStatus create_port(
+      Rwcal.CloudAccount account,
+      Rwcal.PortInfoItem port,
+      out string port_id);
+
+    public abstract RwTypes.RwStatus delete_port(
+      Rwcal.CloudAccount account,
+      string port_id);
+
+    public abstract RwTypes.RwStatus get_port(
+      Rwcal.CloudAccount account,
+      string port_id,
+      out Rwcal.PortInfoItem port);
+
+    public abstract RwTypes.RwStatus get_port_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources ports);
+
+    /*
+     * Host related APIs
+     */
+    public abstract RwTypes.RwStatus add_host(
+      Rwcal.CloudAccount account,
+      Rwcal.HostInfoItem host,
+      out string host_id);
+
+    public abstract RwTypes.RwStatus remove_host(
+      Rwcal.CloudAccount account,
+      string host_id);
+
+    public abstract RwTypes.RwStatus get_host(
+      Rwcal.CloudAccount account,
+      string host_id,
+      out Rwcal.HostInfoItem host);
+
+    public abstract RwTypes.RwStatus get_host_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources hosts);
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus create_network(
+      Rwcal.CloudAccount account,
+      Rwcal.NetworkInfoItem network,
+      out string network_id);
+
+    public abstract RwTypes.RwStatus delete_network(
+      Rwcal.CloudAccount account,
+      string network_id);
+
+    public abstract RwTypes.RwStatus get_network(
+      Rwcal.CloudAccount account,
+      string network_id,
+      out Rwcal.NetworkInfoItem network);
+
+    public abstract RwTypes.RwStatus get_network_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VimResources networks);
+
+    public abstract RwTypes.RwStatus get_management_network(
+      Rwcal.CloudAccount account,
+      out Rwcal.NetworkInfoItem network);
+
+    /*
+     * Higher Order CAL APIs
+     */
+    public abstract void create_virtual_link(
+      Rwcal.CloudAccount account,
+      Rwcal.VirtualLinkReqParams link_params,
+      out RwcalStatus status,
+      out string link_id);
+    
+    public abstract RwTypes.RwStatus delete_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id);
+
+    public abstract RwTypes.RwStatus get_virtual_link(
+      Rwcal.CloudAccount account,
+      string link_id,
+      out Rwcal.VirtualLinkInfoParams response);
+
+    public abstract RwTypes.RwStatus get_virtual_link_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+
+
+    public abstract void create_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUInitParams vdu_params,
+      out RwcalStatus status,
+      out string vdu_id);
+
+    public abstract RwTypes.RwStatus modify_vdu(
+      Rwcal.CloudAccount account,
+      Rwcal.VDUModifyParams vdu_params);
+    
+    public abstract RwTypes.RwStatus delete_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id);
+
+    public abstract RwTypes.RwStatus get_vdu(
+      Rwcal.CloudAccount account,
+      string vdu_id,
+      out Rwcal.VDUInfoParams response);
+    
+    public abstract RwTypes.RwStatus get_vdu_list(
+      Rwcal.CloudAccount account,
+      out Rwcal.VNFResources resources);
+    
+  }
+}
+
+
diff --git a/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt b/rwcal/plugins/vala/rwcal_aws/CMakeLists.txt
new file mode 100644 (file)
index 0000000..76430b1
--- /dev/null
@@ -0,0 +1,37 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-aws)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_aws rwcal_aws.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/aws/__init__.py
+    rift/rwcal/aws/aws_table.py
+    rift/rwcal/aws/aws_drv.py
+    rift/rwcal/aws/exceptions.py
+    rift/rwcal/aws/prepare_vm.py
+    rift/rwcal/aws/delete_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_aws/Makefile b/rwcal/plugins/vala/rwcal_aws/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/__init__.py
new file mode 100644 (file)
index 0000000..4ce1fa2
--- /dev/null
@@ -0,0 +1 @@
+from .aws_drv import AWSDriver
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_drv.py
new file mode 100644 (file)
index 0000000..2c47279
--- /dev/null
@@ -0,0 +1,974 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import boto3
+import botocore
+from . import aws_table
+from . import exceptions
+
+import logging
+logger = logging.getLogger('rwcal.aws.drv')
+logger.setLevel(logging.DEBUG)
+
+class AWSDriver(object):
+    """
+    Driver for AWS
+    """
+    def __init__(self, key, secret, region,ssh_key=None,vpcid = None,availability_zone = None,default_subnet_id = None):
+        """
+          Constructor for AWSDriver
+          Arguments:
+             key    : AWS user access key
+             secret : AWS user access secret
+             region : AWS region
+             ssh_key: Name of key pair to connect to EC2 instance
+             vpcid  : VPC ID for the resources
+             availability_zone: Avaialbility zone to allocate EC2 instance.
+             default_subnet_id: Default subnet id to be used for the EC2 instance interfaces at instance creation time
+          Returns: AWS Driver Object 
+        """
+        self._access_key    = key
+        self._access_secret = secret
+        self._region        = region
+        self._availability_zone =  availability_zone
+        self._ssh_key       = ssh_key
+        
+        self._sess  = boto3.session.Session(aws_access_key_id = self._access_key,
+                                            aws_secret_access_key = self._access_secret,
+                                            region_name = self._region)
+        self._ec2_resource_handle = self._sess.resource(service_name = 'ec2')
+        self._s3_handle  = self._sess.resource(service_name = 's3')
+        self._iam_handle = self._sess.resource(service_name = 'iam')
+
+        self._acct_arn = self._iam_handle.CurrentUser().arn
+        self._account_id = self._acct_arn.split(':')[4]
+        # If VPC id is not passed; use default VPC for the account 
+        if vpcid is None:
+            self._vpcid = self._default_vpc_id
+        else:
+            self._vpcid  = vpcid
+
+        self._default_subnet_id = default_subnet_id 
+        # If default_subnet_is is not passed; get default subnet for AZ.
+        # We use this to create first network interface during instance creation time. This subnet typically should have associate public address 
+        # to get public address.  
+        if default_subnet_id is None:
+            self._default_subnet_id = self._get_default_subnet_id_for_az 
+           
+       
+    @property
+    def default_subnet_id(self):
+        """
+           Returns default subnet id for account
+        """
+        return self._default_subnet_id
+
+    @property
+    def _ec2_client_handle(self):
+        """
+        Low level EC2 client connection handle
+           Arguments: None
+           Returns: EC2 Client Connection Handle
+        """
+        return self._ec2_resource_handle.meta.client
+
+    @property
+    def _default_vpc_id(self):
+        """
+        Method to get Default VPC ID
+          Arguments: None
+          Returns: Default EC2.Vpc Resource ID for AWS account
+        """
+        return self._default_vpc.vpc_id
+
+    @property
+    def _default_vpc(self):
+        """
+        Method to get Default VPC Resource Object
+           Arguments: None
+           Returns: Default EC2.Vpc Resource for AWS account
+        """
+        try:
+           response = list(self._ec2_resource_handle.vpcs.all())
+        except Exception as e:
+            logger.error("AWSDriver: Get of Default VPC failed with exception: %s" %(repr(e)))
+            raise
+        default_vpc = [vpc for vpc in response if vpc.is_default]
+        assert(len(default_vpc) == 1)
+        return default_vpc[0]
+
+    def _get_vpc_info(self,VpcId):
+        """
+        Get Vpc resource for specificed VpcId
+          Arguments:
+            - VpcId (String) : VPC ID  
+          Returns: EC2.Vpc Resouce
+        """ 
+        VpcIds = list()
+        VpcIds.append(VpcId)
+        response = list(self._ec2_resource_handle.vpcs.filter(
+                                               VpcIds = VpcIds))
+        if response:
+            assert(len(response) == 1)
+            return response[0]
+        return None
+
+
+    def upload_image(self, **kwargs):
+        """
+        Upload image to s3
+          Arguments: **kwargs -- dictionary
+               {
+                 'image_path'          : File location for the image,
+                 'image_prefix'        : Name-Prefix of the image on S3 
+                 'public_key'          : The path to the user's PEM encoded RSA public key certificate file,
+                 'private_key'         : The path to the user's PEM encoded RSA private key file,
+                 'arch'                : One of ["i386", "x86_64"],
+                 's3_bucket'           : Name of S3 bucket where this image should be uploaded
+                                         (e.g. 'Rift.Cal' or 'Rift.VNF' or 'Rift.3rdPartyVM' etc)
+                 'kernelId'            : Id of the default kernel to launch the AMI with (OPTIONAL)
+                 'ramdiskId'           : Id of the default ramdisk to launch the AMI with (OPTIONAL)
+                 'block_device_mapping : block_device_mapping string  (OPTIONAL)
+                                         Default block-device-mapping scheme to launch the AMI with. This scheme
+                                         defines how block devices may be exposed to an EC2 instance of this AMI
+                                         if the instance-type of the instance is entitled to the specified device.
+                                         The scheme is a comma-separated list of key=value pairs, where each key
+                                         is a "virtual-name" and each value, the corresponding native device name
+                                         desired. Possible virtual-names are:
+                                         - "ami": denotes the root file system device, as seen by the instance.
+                                         - "root": denotes the root file system device, as seen by the kernel.
+                                         - "swap": denotes the swap device, if present.
+                                         - "ephemeralN": denotes Nth ephemeral store; N is a non-negative integer.
+                                          Note that the contents of the AMI form the root file system. Samples of
+                                          block-device-mappings are:
+                                          '"ami=sda1","root=/dev/sda1","ephemeral0=sda2","swap=sda3"'
+                                          '"ami=0","root=/dev/dsk/c0d0s0","ephemeral0=1"'
+               }
+          Returns: None
+        """
+        import subprocess
+        import tempfile
+        import os
+        import shutil
+        
+        CREATE_BUNDLE_CMD  = 'ec2-bundle-image --cert {public_key} --privatekey {private_key} --user {account_id} --image {image_path} --prefix {image_prefix} --arch {arch}'
+        UPLOAD_BUNDLE_CMD  = 'ec2-upload-bundle --bucket {bucket} --access-key {key} --secret-key {secret} --manifest {manifest} --region {region} --retry'
+        
+        cmdline = CREATE_BUNDLE_CMD.format(public_key    = kwargs['public_key'],
+                                           private_key   = kwargs['private_key'],
+                                           account_id    = self._account_id,
+                                           image_path    = kwargs['image_path'],
+                                           image_prefix  = kwargs['image_prefix'],
+                                           arch          = kwargs['arch'])
+        
+        if 'kernelId' in kwargs:
+            cmdline += (' --kernel ' + kwargs['kernelId'])
+
+        if 'ramdiskId' in kwargs:
+            cmdline += (' --ramdisk ' + kwargs['ramdiskId'])
+            
+        if 'block_device_mapping' in kwargs:
+            cmdline += ' --block-device-mapping ' + kwargs['block_device_mapping']
+
+        ### Create Temporary Directory
+        try:
+            tmp_dir = tempfile.mkdtemp()
+        except Exception as e:
+            logger.error("Failed to create temporary directory. Exception Details: %s" %(repr(e)))
+            raise
+
+        cmdline += (" --destination " + tmp_dir)
+        logger.info('AWSDriver: Executing ec2-bundle-image command. Target directory name: %s. This command may take a while...\n' %(tmp_dir))
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-bundle-image command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-bundle-image command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-bundle-image command failed. Return code %d' %(result))
+        
+        logger.info('AWSDriver: Initiating image upload. This may take a while...')
+
+        cmdline = UPLOAD_BUNDLE_CMD.format(bucket   = kwargs['s3_bucket'],
+                                           key      = self._access_key,
+                                           secret   = self._access_secret,
+                                           manifest = tmp_dir+'/'+kwargs['image_prefix']+'.manifest.xml',
+                                           region   = self._region)
+        result = subprocess.call(cmdline.split())
+        if result == 0:
+            logger.info('AWSDriver: ec2-upload-bundle command succeeded')
+        else:
+            logger.error('AWSDriver: ec2-upload-bundle command failed. Return code %d. CMD: %s'%(result, cmdline))
+            raise OSError('AWSDriver: ec2-upload-bundle command failed. Return code %d' %(result))
+        ### Delete the temporary directory
+        logger.info('AWSDriver: Deleting temporary directory and other software artifacts')
+        shutil.rmtree(tmp_dir, ignore_errors = True)
+        
+                     
+    def register_image(self, **kwargs):
+        """
+        Registers an image uploaded to S3 with EC2
+           Arguments: **kwargs -- dictionary
+             {
+                Name (string)         : Name of the image
+                ImageLocation(string) : Location of image manifest file in S3 (e.g. 'rift.cal.images/test-img.manifest.xml')
+                Description(string)   : Description for the image (OPTIONAL)
+                Architecture (string) : Possible values 'i386' or 'x86_64' (OPTIONAL)
+                KernelId(string)      : Kernel-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RamdiskId(string)     : Ramdisk-ID Refer: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html#AmazonKernelImageIDs (OPTIONAL)
+                RootDeviceName(string): The name of the root device (for example, /dev/sda1 , or /dev/xvda ) (OPTIONAL)
+                BlockDeviceMappings(list) : List of dictionary of block device mapping (OPTIONAL)
+                                            [
+                                               {
+                                                 'VirtualName': 'string',
+                                                 'DeviceName': 'string',
+                                                 'Ebs': {
+                                                    'SnapshotId': 'string',
+                                                    'VolumeSize': 123,
+                                                    'DeleteOnTermination': True|False,
+                                                    'VolumeType': 'standard'|'io1'|'gp2',
+                                                    'Iops': 123,
+                                                    'Encrypted': True|False
+                                                 },
+                                                 'NoDevice': 'string'
+                                              },
+                                            ]
+                VirtualizationType(string): The type of virtualization (OPTIONAL)
+                                           Default: paravirtual
+                SriovNetSupport(string): (OPTIONAL)
+                       Set to ``simple`` to enable enhanced networking for the AMI and any instances that are launched from the AMI.
+                       This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.
+        
+          Returns:
+             image_id: UUID of the image
+        """
+
+        kwargs['DryRun'] = False
+        try:
+            response = self._ec2_client_handle.register_image(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        return response['ImageId']
+        
+
+    def deregister_image(self, ImageId):
+        """
+        DeRegisters image from EC2.
+          Arguments:
+            - ImageId (string): ImageId generated by AWS in register_image call
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.deregister_image(
+                                                         ImageId = ImageId)
+        except Exception as e:
+            logger.error("AWSDriver: deregister_image operation failed with exception: %s" %(repr(e)))
+            raise
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        
+    def get_image(self, ImageId):
+        """
+        Returns a dictionary object describing the Image identified by ImageId
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(ImageIds = [ImageId]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response[0]
+        
+    def list_images(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with image
+           Arguments: None
+           Returns: List of dictionaries.
+        """
+        try:
+            response = list(self._ec2_resource_handle.images.filter(Owners = [self._account_id]))
+        except Exception as e:
+            logger.error("AWSDriver: List image operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def create_image_from_instance(self,InstanceId,ImageName,VolumeSize = 16):
+        """
+        Creates AWS AMI from the instance root device Volume and registers the same
+        Caller is expected to stop the instance and restart the instance if required 
+        Arguments:
+           - InstanceId (String) : AWS EC2 Instance Id
+           - ImageName (String)  : Name for AMI
+         Returns
+           - AWS AMI Image Id
+        """
+
+        try:
+            inst = self.get_instance(InstanceId)
+            # Find Volume Id of Root Device
+            if inst.root_device_type == 'ebs':
+                for dev in inst.block_device_mappings:
+                    if inst.root_device_name == dev['DeviceName']:
+                        volume_id = dev['Ebs']['VolumeId']
+                        break
+
+                rsp=self._ec2_resource_handle.create_snapshot(VolumeId=volume_id)
+                snapshot_id = rsp.id
+
+                #Wait for the snapshot to be completed
+                attempts = 0
+                while attempts < 2:
+                    try:
+                        attempts = attempts + 1
+                        waiter = self._ec2_client_handle.get_waiter('snapshot_completed')
+                        waiter.wait(SnapshotIds=[snapshot_id])
+                    except botocore.exceptions.WaiterError as e:
+                        logger.error("AWSDriver: Create Snapshot for image still not completed. Will wait for another iteration") 
+                        continue
+                    except Exception as e:
+                        logger.error("AWSDriver: Createing Snapshot for instance failed during image creation: %s", (repr(e)))
+                        raise
+                    break
+                  
+                logger.debug("AWSDriver: Snapshot %s completed successfully from instance %s",snapshot_id,InstanceId)
+                image_id = self.register_image(Name=ImageName,VirtualizationType='hvm',
+                                               RootDeviceName='/dev/sda1',SriovNetSupport='simple',
+                                               BlockDeviceMappings=[{'DeviceName':'/dev/sda1',
+                                               'Ebs':{'SnapshotId':snapshot_id,'VolumeSize': VolumeSize,
+                                               'VolumeType': 'standard', 'DeleteOnTermination': True}}],
+                                               Architecture='x86_64')
+                return image_id
+            else:
+                logger.error("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image") 
+                raise exceptions.RWErrorFailure("AWSDriver: Create Image failed as Instance Root device Type should be ebs to create image")
+        except Exception as e:
+            logger.error("AWSDriver: Createing image from instance failed with exception: %s", (repr(e)))
+            raise
+        
+    def list_instances(self):
+        """
+        Returns list of resource object representing EC2 instance.
+           Arguments: None
+           Returns:  List of EC2.Instance object
+        """
+        instance_list = []
+        try:
+            # Skip Instances in terminated state
+            response = self._ec2_resource_handle.instances.filter(
+                                                           Filters = [
+                                                               { 'Name': 'instance-state-name',
+                                                                 'Values': ['pending',
+                                                                            'running',
+                                                                            'shutting-down',
+                                                                            'stopping',
+                                                                            'stopped']
+                                                            }
+                                                           ])
+        except Exception as e:
+            logger.error("AWSDriver: List instances operation failed with exception: %s" %(repr(e)))
+            raise
+        for instance in response:
+             instance_list.append(instance)
+        return instance_list
+
+    def get_instance(self, InstanceId):
+        """
+        Returns a EC2 resource Object describing the Instance identified by InstanceId
+           Arguments:
+             - InstnaceId (String) : MANDATORY, EC2 Instance Id
+           Returns: EC2.Instance object
+        """
+
+        try:
+            instance = list(self._ec2_resource_handle.instances.filter(
+                                                           InstanceIds = [InstanceId]))
+        except Exception as e:
+            logger.error("AWSDriver: Get instances operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(instance) == 0:
+            logger.error("AWSDriver: instance with id %s not avaialble" %InstanceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: instance with id %s not avaialble" %InstanceId)
+        elif len(instance) > 1:
+            logger.error("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate instances with id %s is avaialble" %InstanceId)
+        return instance[0] 
+
+    def create_instance(self,**kwargs):
+        """
+         Create an EC2instance.
+            Arguments: **kwargs -- dictionary
+               {
+                  ImageId (string): MANDATORY, Id of AMI to create instance 
+                  SubetId (string): Id of Subnet to start EC2 instance. EC2 instance will be started in VPC subnet resides. 
+                                    Default subnet from account used if not present
+                  InstanceType(string): AWS Instance Type name. Default: t2.micro
+                  SecurityGroupIds: AWS Security Group Id to associate with the instance. Default from VPC used if not present
+                  KeyName (string): Key pair name. Default key pair from account used if not present 
+                  MinCount (Integer): Minimum number of instance to start. Default: 1
+                  MaxCount (Integer): Maximum number of instance to start. Default: 1
+                  Placement (Dict) : Dictionary having Placement group details
+                                     {AvailabilityZone (String): AZ to create the instance}
+                  UserData (string) : cloud-init config file 
+               }
+            Returns: List of EC2.Instance object
+        """ 
+
+        if 'ImageId' not in kwargs:
+            logger.error("AWSDriver: Mandatory parameter ImageId not available during create_instance")
+            raise AttributeError("Mandatory parameter ImageId not available during create_instance")
+
+        #Validate image exists and is avaialble
+        try:
+            image_res = self._ec2_resource_handle.Image(kwargs['ImageId'])
+            image_res.load() 
+        except Exception as e:
+            logger.error("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+            raise AttributeError("AWSDriver: Image with id %s not available and failed with exception: %s",kwargs['ImageId'],(repr(e)))
+        if image_res.state != 'available':
+            logger.error("AWSDriver: Image state is not available for image with id %s; Current state is %s",
+                         image_res.id,image_res.state)
+            raise AttributeError("ImageId is not valid")
+
+        # If MinCount or MaxCount is not passed set them to default of 1
+        if 'MinCount' not in kwargs:
+            kwargs['MinCount'] = 1  
+        if 'MaxCount' not in kwargs:
+            kwargs['MaxCount'] = kwargs['MinCount'] 
+
+        if 'KeyName' not in kwargs:
+            if not self._ssh_key:
+                logger.error("AWSDriver: Key not available during create_instance to allow SSH")
+            else:
+                kwargs['KeyName'] = self._ssh_key
+
+        if 'Placement' not in kwargs and self._availability_zone is not None:
+            placement = {'AvailabilityZone':self._availability_zone}
+            kwargs['Placement'] = placement
+
+        if 'SubnetId' not in kwargs and 'NetworkInterfaces' not in kwargs:
+            if self._default_subnet_id:
+                kwargs['SubnetId'] = self._default_subnet_id
+            else: 
+                logger.error("AWSDriver: Valid subnetid not present during create instance")
+                raise AttributeError("Valid subnet not present during create instance")
+
+        if self._availability_zone and 'SubnetId' in kwargs:
+            subnet = self.get_subnet(SubnetId= kwargs['SubnetId']) 
+            if not subnet:
+                logger.error("AWSDriver: Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+                raise AttributeError("Valid subnet not found for subnetid %s",kwargs['SubnetId'])
+            if subnet.availability_zone != self._availability_zone:
+                logger.error("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+                raise AttributeError("AWSDriver: AZ of Subnet %s %s doesnt match account AZ %s",kwargs['SubnetId'],
+                                       subnet.availability_zone,self._availability_zone)
+
+        # If instance type is not passed; use t2.micro as default
+        if 'InstanceType' not in kwargs or kwargs['InstanceType'] is None:
+               kwargs['InstanceType'] = 't2.micro'
+        inst_type =  kwargs['InstanceType']
+        if inst_type not in aws_table.INSTANCE_TYPES.keys():
+            logger.error("AWSDriver: Invalid instance type %s used",inst_type)
+            raise AttributeError('InstanceType %s is not valid' %inst_type)
+
+        #validate instance_type for AMI 
+        if image_res.sriov_net_support == 'simple':
+            if image_res.virtualization_type != 'hvm':
+                logger.error("AWSDriver: Image with id %s has SRIOV net support but virtualization type is not hvm",kwargs['ImageId'])
+                raise AttributeError('Invalid Image with id %s' %kwargs['ImageId'])
+            if aws_table.INSTANCE_TYPES[inst_type]['sriov'] is False:
+                logger.warning("AWSDriver: Image %s support SR-IOV but instance type %s does not support HVM",kwargs['ImageId'],inst_type)
+
+        if image_res.virtualization_type == 'paravirtual' and aws_table.INSTANCE_TYPES[inst_type]['paravirt'] is False:  # Need to check virt type str for PV
+            logger.error("AWSDriver: Image %s requires PV support but instance %s does not support PV",kwargs['ImageId'],inst_type)
+            raise AttributeError('Image %s requires PV support but instance %s does not support PV',kwargs['ImageId'],inst_type)
+
+        if image_res.root_device_type == 'instance-store' and aws_table.INSTANCE_TYPES[inst_type]['disk'] ==  0: 
+            logger.error("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type) 
+            raise AttributeError("AWSDriver: Image %s uses instance-store root device type that is not supported by instance type %s",kwargs['ImageId'],inst_type)
+
+
+        # Support of instance type varies across regions and also based on account. So we are not validating it
+        #if inst_type not in aws_table.REGION_DETAILS[self._region]['instance_types']:
+        #    logger.error("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+        #    raise AttributeError("AWSDriver: instance type %s not supported in region %s",inst_type,self._region)
+
+        try:
+            instances = self._ec2_resource_handle.create_instances(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Creating instance failed with exception: %s" %(repr(e)))
+            raise  
+        return instances
+
+    def terminate_instance(self,InstanceId):
+        """
+        Termintae an EC2 instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.terminate_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Terminate instance failed with exception: %s" %(repr(e)))
+            raise  
+        return response 
+
+    def stop_instance(self,InstanceId):
+        """
+        Stop an EC2 instance. Stop is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.stop_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Stop for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+
+    def start_instance(self,InstanceId):
+        """
+        Start an EC2 instance. Start is supported only for EBS backed instance
+           Arguments:
+            - InstanceId (String): ID of EC2 instance
+           Returns: None
+        """ 
+
+        InstanceIds = InstanceId
+        if type(InstanceIds) is not list:
+            InstanceIds = list()
+            InstanceIds.append(InstanceId)
+
+        try:
+            response = self._ec2_client_handle.start_instances(InstanceIds=InstanceIds)
+        except Exception as e:
+            logger.error("AWSDriver: Start for instance %s failed with exception: %s",InstanceId,repr(e))
+            raise  
+        return response 
+       
+    @property
+    def _get_default_subnet_id_for_az(self):
+        """
+        Get default subnet id for AWS Driver registered Availability Zone 
+          Arguments: None
+          Returns: SubnetId (String)
+        """ 
+
+        if self._availability_zone:
+            subnet = self._get_default_subnet_for_az(self._availability_zone)
+            return subnet.id
+        else:
+            return None
+
+    def _get_default_subnet_for_az(self,AvailabilityZone):
+        """
+        Get default Subnet for Avaialbility Zone
+           Arguments:
+              - AvailabilityZone (String) : EC2 AZ
+           Returns: EC2.Subnet object
+        """
+
+        AvailabilityZones = [AvailabilityZone]
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(
+                                                              Filters = [
+                                                               {'Name':'availability-zone',
+                                                                 'Values': AvailabilityZones}]))
+        except Exception as e:
+            logger.error("AWSDriver: Get default subnet for Availability zone failed with exception: %s" %(repr(e)))
+            raise
+        default_subnet = [subnet for subnet in response if subnet.default_for_az is True and subnet.vpc_id == self._vpcid]
+        assert(len(default_subnet) == 1)
+        return default_subnet[0]
+        
+    def get_subnet_list(self,VpcId=None):
+        """
+        List all the subnets
+          Arguments:
+           - VpcId (String) - VPC ID to filter the subnet list
+        Returns: List of EC2.Subnet Object
+        """
+
+        try:
+            VpcIds = VpcId
+            if VpcId is not None:
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.subnets.filter(
+                                              Filters = [
+                                              { 'Name': 'vpc-id',
+                                              'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.subnets.all())
+        except Exception as e:
+            logger.error("AWSDriver: List subnets operation failed with exception: %s" %(repr(e)))
+            raise
+        return response 
+
+    def get_subnet(self,SubnetId):
+        """
+       Get the subnet for specified SubnetId
+          Arguments:
+             - SubnetId (String) - MANDATORY
+          Returns: EC2.Subnet Object
+       """
+
+        try:
+            response = list(self._ec2_resource_handle.subnets.filter(SubnetIds=[SubnetId]))
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Get Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Get subnet operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+            raise exceptions.RWErrorNotFoun("AWSDriver: subnet with id %s is not avaialble" %SubnetId)
+        elif len(response) > 1: 
+            logger.error("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate subnet with id %s is avaialble" %SubnetId)
+        return response[0] 
+
+    def create_subnet(self,**kwargs):
+        """
+        Create a EC2 subnet based on specified CIDR
+          Arguments:
+             - CidrBlock (String): MANDATORY. CIDR for subnet. CIDR should be within VPC CIDR
+             - VpcId (String): VPC ID to create the subnet. Default AZ from AWS Driver registration used if not present. 
+             - AvailabilityZone (String): Availability zone to create subnet. Default AZ from AWS Driver registration used
+                                          if not present
+          Returns: EC2.Subnet Object 
+        """
+
+        if 'CidrBlock' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+            raise AttributeError("AWSDriver: Insufficent params for create_subnet. CidrBlock is mandatory parameter")
+
+        if 'VpcId' not in kwargs:
+            kwargs['VpcId'] = self._vpcid
+        if 'AvailabilityZone' not in kwargs and self._availability_zone is not None:
+            kwargs['AvailabilityZone'] = self._availability_zone
+
+        vpc = self._get_vpc_info(kwargs['VpcId'])
+        if not vpc:
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+            raise exceptions.RWErrorNotFound("AWSDriver: Subnet creation failed as VpcId %s does not exist", kwargs['VpcId'])
+        if vpc.state != 'available':
+            logger.error("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+            raise exceptions.RWErrorNotConnected("AWSDriver: Subnet creation failed as VpcId %s is not in available state. Current state is %s", kwargs['VpcId'],vpc.state)
+        
+        try:
+            subnet = self._ec2_resource_handle.create_subnet(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnet.Conflict':
+                logger.error("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise exceptions.RWErrorExists("AWSDriver: Create Subnet for ip %s failed due to overalp with existing subnet in VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           elif e.response['Error']['Code'] == 'InvalidSubnet.Range':
+                logger.error("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+                raise AttributeError("AWSDriver: Create Subnet for ip %s failed as it is not in VPC CIDR range for VPC %s",kwargs['CidrBlock'],kwargs['VpcId'])
+           else:
+               logger.error("AWSDriver: Creating subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Creating subnet failed with exception: %s" %(repr(e)))
+            raise  
+        return subnet
+
+    def modify_subnet(self,SubnetId,MapPublicIpOnLaunch):
+        """
+        Modify a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+               - MapPublicIpOnLaunch (Boolean): Flag to indicate if subnet is associated with public IP 
+        """
+
+        try:
+            response = self._ec2_client_handle.modify_subnet_attribute(SubnetId=SubnetId,MapPublicIpOnLaunch={'Value':MapPublicIpOnLaunch})
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Modify Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Modify subnet failed with exception: %s",(repr(e)))
+            raise
+
+
+    def delete_subnet(self,SubnetId):
+        """
+        Delete a EC2 subnet
+           Arguements: 
+               - SubnetId (String): MANDATORY, EC2 Subnet ID
+           Returns: None 
+        """
+
+        try:
+            response = self._ec2_client_handle.delete_subnet(SubnetId=SubnetId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Subnet Invalid SubnetID %s",SubnetId)
+           else:
+               logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete subnet failed with exception: %s",(repr(e)))
+            raise
+
+    def get_network_interface_list(self,SubnetId=None,VpcId=None,InstanceId = None):
+        """
+        List all the network interfaces
+           Arguments:
+              - SubnetId (String)
+              - VpcId (String)
+              - InstanceId (String)
+           Returns List of EC2.NetworkInterface  
+        """
+
+        try:
+            if InstanceId is not None:
+                InstanceIds = [InstanceId]
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'attachment.instance-id',
+                                                 'Values': InstanceIds}]))
+            elif SubnetId is not None:
+                SubnetIds = SubnetId
+                if type(SubnetId) is not list:
+                    SubnetIds = list()
+                    SubnetIds.append(SubnetId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'subnet-id',
+                                              'Values': SubnetIds}]))
+            elif VpcId is not None:
+                VpcIds = VpcId
+                if type(VpcIds) is not list:
+                    VpcIds = list()
+                    VpcIds.append(VpcId)
+                response = list(self._ec2_resource_handle.network_interfaces.filter(
+                                              Filters = [
+                                              { 'Name': 'vpc-id',
+                                              'Values': VpcIds}]))
+            else:
+                response = list(self._ec2_resource_handle.network_interfaces.all())
+        except Exception as e:
+            logger.error("AWSDriver: List network interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        return response
+
+    def get_network_interface(self,NetworkInterfaceId):
+        """
+       Get the network interface
+          Arguments:
+              NetworkInterfaceId (String): MANDATORY, EC2 Network Interface Id
+         Returns:  EC2.NetworkInterface Object
+       """
+
+        try:
+            response = list(self._ec2_resource_handle.network_interfaces.filter(NetworkInterfaceIds=[NetworkInterfaceId]))
+        except Exception as e:
+            logger.error("AWSDriver: List Network Interfaces operation failed with exception: %s" %(repr(e)))
+            raise
+        if len(response) == 0:
+            logger.error("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorNotFound("AWSDriver: Network interface with id %s is not avaialble" %NetworkInterfaceId)
+        elif len(response) > 1:
+            logger.error("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+            raise exceptions.RWErrorDuplicate("AWSDriver: Duplicate Network interface with id %s is avaialble" %NetworkInterfaceId)
+        return response[0] 
+
+    def create_network_interface(self,**kwargs):
+        """
+        Create a network interface in specified subnet 
+          Arguments:
+             - SubnetId (String): MANDATORY, Subnet to create network interface
+          Returns: EC2.NetworkInterface Object
+        """
+
+        if 'SubnetId' not in kwargs:
+            logger.error("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+            raise AttributeError("AWSDriver: Insufficent params for create_network_inteface . SubnetId is mandatory parameters")
+
+        try:
+            interface = self._ec2_resource_handle.create_network_interface(**kwargs)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidSubnetID.NotFound':
+                logger.error("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+                raise exceptions.RWErrorNotFound("AWSDriver: Create Network interface failed as subnet %s is not found",kwargs['SubnetId'])
+           else:
+               logger.error("AWSDriver: Creating network interface failed with exception: %s",(repr(e)))
+               raise
+        except Exception as e:
+            logger.error("AWSDriver: Creating network interface failed with exception: %s" %(repr(e)))
+            raise
+        return interface
+
+    def delete_network_interface(self,NetworkInterfaceId):
+        """
+        Delete a network interface
+         Arguments:
+            - NetworkInterfaceId(String): MANDATORY
+         Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.delete_network_interface(NetworkInterfaceId=NetworkInterfaceId)
+        except botocore.exceptions.ClientError as e:
+           if e.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound':
+                logger.error("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+                raise exceptions.RWErrorNotFound("AWSDriver: Delete Network interface not found for interface ID  %s",NetworkInterfaceId)
+           else:
+               logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+               raise  
+        except Exception as e:
+            logger.error("AWSDriver: Delete network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def associate_public_ip_to_network_interface(self,NetworkInterfaceId):
+        """
+        Allocate a Elastic IP and associate to network interface
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            response = self._ec2_client_handle.allocate_address(Domain='vpc')
+            self._ec2_client_handle.associate_address(NetworkInterfaceId=NetworkInterfaceId,AllocationId = response['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+        return response
+
+    def disassociate_public_ip_from_network_interface(self,NetworkInterfaceId):
+        """
+        Disassociate a Elastic IP from network interface and release the same
+          Arguments:
+            NetworkInterfaceId (String): MANDATORY
+          Returns: None
+        """
+        try:
+            interface = self.get_network_interface(NetworkInterfaceId=NetworkInterfaceId) 
+            if interface  and interface.association and 'AssociationId' in interface.association:
+                self._ec2_client_handle.disassociate_address(AssociationId = interface.association['AssociationId'])
+                self._ec2_client_handle.release_address(AllocationId=interface.association['AllocationId'])
+        except Exception as e:
+             logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
+             raise
+
+    def attach_network_interface(self,**kwargs):
+        """
+        Attach network interface to running EC2 instance. Used to add additional interfaces to instance
+          Arguments:
+            - NetworkInterfaceId (String):  MANDATORY,
+            - InstanceId(String) :  MANDATORY
+            - DeviceIndex (Integer): MANDATORY
+          Returns: Dict with AttachmentId which is string
+        """
+
+        if 'NetworkInterfaceId' not in kwargs or 'InstanceId' not in kwargs or 'DeviceIndex' not in kwargs:
+            logger.error('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+            raise AttributeError('AWSDriver: Attach network interface to instance requires NetworkInterfaceId and InstanceId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.attach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Attach network interface failed with exception: %s",(repr(e)))
+            raise
+        return response
+
+    def detach_network_interface(self,**kwargs):
+        """
+        Detach network interface from instance 
+          Arguments:
+            - AttachmentId (String)
+          Returns: None 
+        """
+
+        if 'AttachmentId' not in kwargs:
+            logger.error('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+            raise AttributeError('AWSDriver: Detach network interface from instance requires AttachmentId as mandatory parameters')
+
+        try:
+            response = self._ec2_client_handle.detach_network_interface(**kwargs)
+        except Exception as e:
+            logger.error("AWSDriver: Detach network interface failed with exception: %s",(repr(e)))
+            raise
+
+    def map_flavor_to_instance_type(self,ram,vcpus,disk,inst_types = None):
+        """
+        Method to find a EC2 instance type matching the requested params
+          Arguments:
+             - ram (Integer) : RAM size in MB
+             - vcpus (Integer): VPCU count
+             - disk (Integer): Storage size in GB
+             - inst_types (List): List of string having list of EC2 instance types to choose from
+                                  assumed to be in order of resource size 
+          Returns
+             InstanceType (String) - EC2 Instance Type
+        """
+        if inst_types is None:
+            inst_types = ['c3.large','c3.xlarge','c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge']
+        
+        for inst in inst_types:
+           if inst in aws_table.INSTANCE_TYPES:
+               if ( aws_table.INSTANCE_TYPES[inst]['ram'] > ram and  
+                    aws_table.INSTANCE_TYPES[inst]['vcpu'] > vcpus and 
+                    aws_table.INSTANCE_TYPES[inst]['disk'] > disk):
+                   return inst
+        return 't2.micro'  
+
+    def upload_ssh_key(self,key_name,public_key):
+        """
+        Method to upload Public Key to AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+            - public_key (String): Base 64 encoded public key
+          Returns  None
+        """
+        self._ec2_resource_handle.import_key_pair(KeyName=key_name,PublicKeyMaterial=public_key) 
+
+    def delete_ssh_key(self,key_name):
+        """
+        Method to delete Public Key from AWS
+          Arguments:
+            - keyname (String): Name for the key pair
+          Returns  None
+        """
+        self._ec2_client_handle.delete_key_pair(KeyName=key_name) 
+             
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/aws_table.py
new file mode 100644 (file)
index 0000000..a7349fd
--- /dev/null
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+"""
+Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
+From http://aws.amazon.com/ec2/instance-types/
+max_inst From http://aws.amazon.com/ec2/faqs/#How_many_instances_can_I_run_in_Amazon_EC2 
+paravirt from https://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
+"""
+INSTANCE_TYPES = {
+    'm4.large': {
+        'id': 'm4.large',
+        'name': 'Large Instance',
+        'ram': 8*1024,
+        'vcpu': 2,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.xlarge': {
+        'id': 'm4.xlarge',
+        'name': 'Large Instance',
+        'ram': 16*1024,
+        'vcpu': 4,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.2xlarge': {
+        'id': 'm4.2xlarge',
+        'name': 'Large Instance',
+        'ram': 32*1024,
+        'vcpu': 8,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.4xlarge': {
+        'id': 'm4.4xlarge',
+        'name': 'Large Instance',
+        'ram': 64*1024,
+        'vcpu': 16,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm4.10xlarge': {
+        'id': 'm4.10xlarge',
+        'name': 'Large Instance',
+        'ram': 160*1024,
+        'vcpu': 40,
+        'disk': 0,
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    'm3.medium': {
+        'id': 'm3.medium',
+        'name': 'Medium Instance',
+        'ram': 3.75*1024, #3840
+        'vcpu': 1,
+        'disk': 4,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.large': {
+        'id': 'm3.large',
+        'name': 'Large Instance',
+        'ram': 7.5*1024, #7168
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.xlarge': {
+        'id': 'm3.xlarge',
+        'name': 'Extra Large Instance',
+        'ram': 15*1024,#15360
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'm3.2xlarge': {
+        'id': 'm3.2xlarge',
+        'name': 'Double Extra Large Instance',
+        'ram': 30*1024, #30720
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': True
+    },
+    'g2.2xlarge': {
+        'id': 'g2.2xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 15000,
+        'disk': 60,
+        'vcpu': 5,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    'g2.8xlarge': {
+        'id': 'g2.8xlarge',
+        'name': 'Cluster GPU G2 Double Extra Large Instance',
+        'ram': 60000,
+        'disk': 240,
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False
+    },
+    # c4 instances have 2 SSDs of the specified disk size
+    'c4.large': {
+        'id': 'c4.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.xlarge': {
+        'id': 'c4.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+         'vcpu':4,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.2xlarge': {
+        'id': 'c4.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+         'vcpu':8,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.4xlarge': {
+        'id': 'c4.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+         'vcpu':16,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'c4.8xlarge': {
+        'id': 'c4.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+         'vcpu':36,
+        'disk': 0,  #EBS-only
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # c3 instances have 2 SSDs of the specified disk size
+    'c3.large': {
+        'id': 'c3.large',
+        'name': 'Compute Optimized Large Instance',
+        'ram': 3750,
+         'vcpu':2,
+        'disk': 32,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.xlarge': {
+        'id': 'c3.xlarge',
+        'name': 'Compute Optimized Extra Large Instance',
+        'ram': 7500,
+        'vcpu':4,
+        'disk': 80,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.2xlarge': {
+        'id': 'c3.2xlarge',
+        'name': 'Compute Optimized Double Extra Large Instance',
+        'ram': 15000,
+        'vcpu':8,
+        'disk': 160,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.4xlarge': {
+        'id': 'c3.4xlarge',
+        'name': 'Compute Optimized Quadruple Extra Large Instance',
+        'ram': 30000,
+        'vcpu':16,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    'c3.8xlarge': {
+        'id': 'c3.8xlarge',
+        'name': 'Compute Optimized Eight Extra Large Instance',
+        'ram': 60000,
+        'vcpu':32,
+        'disk': 640,  # x2
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': True
+    },
+    # i2 instances have up to eight SSD drives
+    'i2.xlarge': {
+        'id': 'i2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 31232,
+         'vcpu': 4,
+        'disk': 800,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.2xlarge': {
+        'id': 'i2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 62464,
+        'vcpu': 8,
+        'disk': 1600,
+        'bandwidth': None,
+        'max_inst': 8,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.4xlarge': {
+        'id': 'i2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Large Instance',
+        'ram': 124928,
+        'vcpu': 16,
+        'disk': 3200,
+        'bandwidth': None,
+        'max_inst': 4,
+        'sriov': True,
+        'paravirt': False
+    },
+    'i2.8xlarge': {
+        'id': 'i2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 249856,
+        'vcpu': 32,
+        'disk': 6400,
+        'bandwidth': None,
+        'max_inst': 2,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.xlarge': {
+        'id': 'd2.xlarge',
+        'name': 'High Storage Optimized Extra Large Instance',
+        'ram': 30050,
+        'vcpu': 4,
+        'disk': 6000,  # 3 x 2 TB
+        'max_inst': 20,
+        'bandwidth': None,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.2xlarge': {
+        'id': 'd2.2xlarge',
+        'name': 'High Storage Optimized Double Extra Large Instance',
+        'ram': 61952,
+        'vcpu': 8,
+        'disk': 12000,  # 6 x 2 TB
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.4xlarge': {
+        'id': 'd2.4xlarge',
+        'name': 'High Storage Optimized Quadruple Extra Large Instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 24000,  # 12 x 2 TB
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'd2.8xlarge': {
+        'id': 'd2.8xlarge',
+        'name': 'High Storage Optimized Eight Extra Large Instance',
+        'ram': 244000,
+        'vcpu': 36,
+        'disk': 48000,  # 24 x 2 TB
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    # 1x SSD
+    'r3.large': {
+        'id': 'r3.large',
+        'name': 'Memory Optimized Large instance',
+        'ram': 15000,
+        'vcpu': 2,
+        'disk': 32,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.xlarge': {
+        'id': 'r3.xlarge',
+        'name': 'Memory Optimized Extra Large instance',
+        'ram': 30500,
+        'vcpu': 4,
+        'disk': 80,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.2xlarge': {
+        'id': 'r3.2xlarge',
+        'name': 'Memory Optimized Double Extra Large instance',
+        'ram': 61000,
+        'vcpu': 8,
+        'disk': 160,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.4xlarge': {
+        'id': 'r3.4xlarge',
+        'name': 'Memory Optimized Quadruple Extra Large instance',
+        'ram': 122000,
+        'vcpu': 16,
+        'disk': 320,
+        'bandwidth': None,
+        'max_inst': 10,
+        'sriov': True,
+        'paravirt': False
+    },
+    'r3.8xlarge': {
+        'id': 'r3.8xlarge',
+        'name': 'Memory Optimized Eight Extra Large instance',
+        'ram': 244000,
+        'vcpu': 32,
+        'disk': 320,  # x2
+        'bandwidth': None,
+        'max_inst': 5,
+        'sriov': True,
+        'paravirt': False
+    },
+    't2.micro': {
+        'id': 't2.micro',
+        'name': 'Burstable Performance Micro Instance',
+        'ram': 1024,
+        'disk': 0,  # EBS Only
+        'vcpu': 1,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 6
+        }
+    },
+    # Burstable Performance General Purpose
+    't2.small': {
+        'id': 't2.small',
+        'name': 'Burstable Performance Small Instance',
+        'ram': 2048,
+        'vcpu': 1,
+        'disk': 0,  # EBS Only
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 12
+        }
+    },
+    't2.medium': {
+        'id': 't2.medium',
+        'name': 'Burstable Performance Medium Instance',
+        'ram': 4096,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 24
+        }
+    },
+    't2.large': {
+        'id': 't2.large',
+        'name': 'Burstable Performance Large Instance',
+        'ram': 8192,
+        'disk': 0,  # EBS Only
+        'vcpu': 2,
+        'bandwidth': None,
+        'max_inst': 20,
+        'sriov': False,
+        'paravirt': False,
+        'extra': {
+            'cpu': 36
+        }
+    }
+}
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/delete_vm.py
new file mode 100644 (file)
index 0000000..05d744b
--- /dev/null
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import rwlogger
+import sys, os, time
+
+logging.basicConfig(level=logging.DEBUG)
+
+logger = logging.getLogger()
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="aws",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+        
+def cleanup_vm(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to terminating state")
+    vm_inst.wait_until_terminated()
+    logger.info("VM inst is now in terminating state") 
+
+    for port_id in argument.vdu_port_list:
+        logger.info("Deleting network interface with id %s",port_id)
+        port = drv.get_network_interface(port_id)
+        if port:
+            if port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(port.id)
+        else:
+            logger.error("Newtork interface with id %s not found when deleting interface",port_id)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which delete operations needs to be performed")
+    
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    cleanup_vm(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/exceptions.py
new file mode 100644 (file)
index 0000000..3bb3aa7
--- /dev/null
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py b/rwcal/plugins/vala/rwcal_aws/rift/rwcal/aws/prepare_vm.py
new file mode 100644 (file)
index 0000000..e0ae55a
--- /dev/null
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.aws as aws_drv
+import logging
+import argparse
+import rwlogger
+import sys, os, time
+
+logging.basicConfig(level=logging.DEBUG)
+
+logger = logging.getLogger()
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="aws",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+
+        
+def prepare_vm_after_boot(drv,argument):
+    vm_inst = drv.get_instance(argument.server_id)
+    logger.info("Waiting for VM instance to get to running state")
+    vm_inst.wait_until_running()
+    logger.info("VM instance is now in running state") 
+    if argument.vdu_name:
+        vm_inst.create_tags(Tags=[{'Key': 'Name','Value':argument.vdu_name}])
+    if argument.vdu_node_id is not None:
+        vm_inst.create_tags(Tags=[{'Key':'node_id','Value':argument.vdu_node_id}])    
+    
+    for index,port_id in enumerate(argument.vdu_port_list):
+        logger.info("Attaching network interface with id %s to VDU instance %s",port_id,vm_inst.id)
+        drv.attach_network_interface(NetworkInterfaceId = port_id,InstanceId = vm_inst.id,DeviceIndex=index+1)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create AWS resources')
+    parser.add_argument('--aws_key',
+                        action = "store",
+                        dest = "aws_key",
+                        type = str,
+                        help='AWS Key')
+
+    parser.add_argument('--aws_secret',
+                        action = "store",
+                        dest = "aws_secret",
+                        type = str,
+                        help = "AWS Secret")
+
+    parser.add_argument('--aws_region',
+                        action = "store",
+                        dest = "aws_region",
+                        type = str,
+                        help = "AWS Region")
+
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--vdu_name',
+                        action = "store",
+                        dest = "vdu_name",
+                        type = str,
+                        help = "VDU name")
+
+    parser.add_argument('--vdu_node_id',
+                        action = "store",
+                        dest = "vdu_node_id",
+                        help = "Node id for vdu")
+
+    parser.add_argument('--vdu_port_list',
+                        action = "append",
+                        dest = "vdu_port_list",
+                        default = [],
+                        help = "Port id list for vdu")
+
+    argument = parser.parse_args()
+
+    if not argument.aws_key:
+        logger.error("ERROR: AWS key is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS key: %s" %(argument.aws_key))
+
+    if not argument.aws_secret:
+        logger.error("ERROR: AWS Secret is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Secret: %s" %(argument.aws_secret))
+
+    if not argument.aws_region:
+        logger.error("ERROR: AWS Region is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using AWS Region: %s" %(argument.aws_region))
+
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.debug("Using Server ID : %s" %(argument.server_id))
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = aws_drv.AWSDriver(key = argument.aws_key,
+                            secret  = argument.aws_secret,
+                            region  = argument.aws_region)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py b/rwcal/plugins/vala/rwcal_aws/rwcal_aws.py
new file mode 100644 (file)
index 0000000..4f212d7
--- /dev/null
@@ -0,0 +1,1111 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+import os
+import subprocess
+import logging
+import rift.rwcal.aws as aws_drv
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+import rift.rwcal.aws.exceptions as exceptions
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+
+PREPARE_VM_CMD = "prepare_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+DELETE_VM_CMD =  "delete_vm.py --aws_key {key} --aws_secret {secret} --aws_region {region} --server_id {server_id}"
+
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
+                          KeyError: RwTypes.RwStatus.NOTFOUND,
+                          NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,
+                          AttributeError: RwTypes.RwStatus.FAILURE,
+                          exceptions.RWErrorNotFound: RwTypes.RwStatus.NOTFOUND,
+                          exceptions.RWErrorDuplicate: RwTypes.RwStatus.DUPLICATE,
+                          exceptions.RWErrorExists: RwTypes.RwStatus.EXISTS,
+                          exceptions.RWErrorNotConnected: RwTypes.RwStatus.NOTCONNECTED,
+                          }
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+class RwcalAWSPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for AWS."""
+
+    flavor_id = 1;
+    instance_num = 1
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = aws_drv.AWSDriver
+        self._flavor_list = []
+        self.log = logging.getLogger('rwcal.aws.%s' % RwcalAWSPlugin.instance_num)
+        self.log.setLevel(logging.DEBUG)
+
+        RwcalAWSPlugin.instance_num += 1
+
+    def _get_driver(self, account):
+        return self._driver_class(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  ssh_key = account.aws.ssh_key,
+                                  vpcid   = account.aws.vpcid,
+                                  availability_zone = account.aws.availability_zone,
+                                  default_subnet_id = account.aws.default_subnet_id)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        self.log.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="aws",
+                    log_hdl=rwlog_ctx,
+                    )
+                )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using underlying API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details="AWS Cloud Account validation not implemented yet"
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The management network
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from AWS
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info.name
+        img.id   = img_info.id
+
+        #tag_fields = ['checksum']
+        # Look for any properties
+        if img_info.tags:
+            for tag in img_info.tags:
+                if tag['Key'] == 'checksum':
+                    setattr(img, tag['Key'], tag['Value'])
+        img.disk_format  = 'ami'
+        if img_info.state == 'available':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        images = self._get_driver(account).list_images()
+        for img in images:
+            response.imageinfo_list.append(RwcalAWSPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        image = self._get_driver(account).get_image(image_id)
+        return RwcalAWSPlugin._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_vm_info(vm_info):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from AWS
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info.id
+        vm.image_id  = vm_info.image_id
+        vm.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vm.state = 'active'
+        else:
+            vm.state = 'inactive'
+        for network_intf in vm_info.network_interfaces:
+            if 'Attachment' in network_intf and network_intf['Attachment']['DeviceIndex'] == 0:
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    vm.public_ip = network_intf['Association']['PublicIp']
+                vm.management_ip = network_intf['PrivateIpAddress']
+            else:
+                addr = vm.private_ip_list.add()
+                addr.ip_address = network_intf['PrivateIpAddress']
+                if 'Association' in network_intf and 'PublicIp' in network_intf['Association']:
+                    addr = vm.public_ip_list.add()
+                    addr.ip_address = network_intf['Association']['PublicIp']
+
+        if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+            vm.availability_zone = vm_info.placement['AvailabilityZone']
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vm.vm_name   = tag['Value']
+                elif tag['Key'] in vm.user_tags.fields:
+                    setattr(vm.user_tags,tag['Key'],tag['Value'])
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        vms = self._get_driver(account).list_instances()
+        for vm in vms:
+            response.vminfo_list.append(RwcalAWSPlugin._fill_vm_info(vm))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        vm = self._get_driver(account).get_instance(id)
+        return RwcalAWSPlugin._fill_vm_info(vm)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+           AWS has fixed set of AWS types and so we map flavor to existing instance type
+           and create local flavor for the same.
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id (with EC2 instance type included in id)
+        """
+        drv = self._get_driver(account)
+        inst_type = drv.map_flavor_to_instance_type(ram       = flavor.vm_flavor.memory_mb,
+                                vcpus     = flavor.vm_flavor.vcpu_count,
+                                disk      = flavor.vm_flavor.storage_gb)
+
+        new_flavor = RwcalYang.FlavorInfoItem()
+        new_flavor.name = flavor.name
+        new_flavor.vm_flavor.memory_mb = flavor.vm_flavor.memory_mb
+        new_flavor.vm_flavor.vcpu_count = flavor.vm_flavor.vcpu_count
+        new_flavor.vm_flavor.storage_gb = flavor.vm_flavor.storage_gb
+        new_flavor.id = inst_type + '-' + str(RwcalAWSPlugin.flavor_id)
+        RwcalAWSPlugin.flavor_id = RwcalAWSPlugin.flavor_id+1
+        self._flavor_list.append(new_flavor)
+        return new_flavor.id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+
+        flavor = [flav for flav in self._flavor_list if flav.id == flavor_id]
+        self._flavor_list.delete(flavor[0])
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info.name
+        flavor.id                         = flavor_info.id
+        flavor.vm_flavor.memory_mb = flavor_info.vm_flavor.memory_mb
+        flavor.vm_flavor.vcpu_count = flavor_info.vm_flavor.vcpu_count
+        flavor.vm_flavor.storage_gb = flavor_info.vm_flavor.storage_gb
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        for flv in self._flavor_list:
+            response.flavorinfo_list.append(RwcalAWSPlugin._fill_flavor_info(flv))
+        return response
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        flavor = [flav for flav in self._flavor_list if flav.id == id]
+        return (RwcalAWSPlugin._fill_flavor_info(flavor[0]))
+
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from AWS
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_id       = network_info.subnet_id
+        network.subnet           = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    network.network_name   = tag['Value']
+        return network
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        networks = self._get_driver(account).get_subnet_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        network = self._get_driver(account).get_subnet(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port/Network interface information from AWS
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_id    = port_info.id
+        port.network_id = port_info.subnet_id
+        if port_info.attachment and 'InstanceId' in port_info.attachment:
+            port.vm_id = port_info.attachment['InstanceId']
+        port.ip_address = port_info.private_ip_address
+        if port_info.status == 'in-use':
+            port.port_state = 'active'
+        elif port_info.status == 'available':
+            port.port_state = 'inactive'
+        else:
+            port.port_state = 'unknown'
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    port.port_name   = tag['Value']
+        return port
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        port = self._get_driver(account).get_network_interface(port_id)
+        return RwcalAWSPlugin._fill_port_info(port)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        ports = self._get_driver(account).get_network_interface_list()
+        for port in ports:
+            response.portinfo_list.append(RwcalAWSPlugin._fill_port_info(port))
+        return response
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        kwargs = {}
+        kwargs['CidrBlock'] = link_params.subnet
+
+        subnet =  drv.create_subnet(**kwargs)
+        if link_params.name:
+            subnet.create_tags(Tags=[{'Key': 'Name','Value':link_params.name}])
+        if link_params.associate_public_ip:
+              drv.modify_subnet(SubnetId=subnet.id,MapPublicIpOnLaunch=link_params.associate_public_ip)
+        return subnet.id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        for port in port_list:
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and 'AttachmentId' in port.attachment:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+                #detach instance takes time; so poll to check port is not in-use
+                port = drv.get_network_interface(NetworkInterfaceId=port.id)
+                retries = 0
+                while port.status == 'in-use' and retries < 10:
+                    time.sleep(5)
+                    port = drv.get_network_interface(NetworkInterfaceId=port.id)
+            drv.delete_network_interface(NetworkInterfaceId=port.id)
+        drv.delete_subnet(link_id)
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts EC2.NetworkInterface object returned by AWS driver into
+        Protobuf Gi Object
+
+        Arguments:
+            port_info - Network Interface information from AWS
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.virtual_link_id = port_info.subnet_id
+        c_point.connection_point_id = port_info.id
+        if port_info.attachment:
+            c_point.vdu_id = port_info.attachment['InstanceId']
+        c_point.ip_address = port_info.private_ip_address
+        if port_info.association and 'PublicIp' in port_info.association:
+                c_point.public_ip = port_info.association['PublicIp']
+        if port_info.tag_set:
+            for tag in port_info.tag_set:
+                if tag['Key'] == 'Name':
+                    c_point.name   = tag['Value']
+        if port_info.status == 'in-use':
+            c_point.state = 'active'
+        elif port_info.status == 'available':
+            c_point.state = 'inactive'
+        else:
+            c_point.state = 'unknown'
+
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Subnet and NetworkInterface object
+        returned by AWS driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Subnet information from AWS
+            port_list - A list of network interface information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        if network_info.state == 'available':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info.subnet_id
+        link.subnet = network_info.cidr_block
+        if network_info.tags:
+            for tag in network_info.tags:
+                if tag['Key'] == 'Name':
+                    link.name   = tag['Value']
+        for port in port_list:
+            c_point = link.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, port_list):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by AWS
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - EC2 instance information from AWS
+            port_list - A list of network interface information from AWS
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vm_info.id
+        mgmt_port = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] == 0]
+        assert(len(mgmt_port) == 1)
+        vdu.management_ip = mgmt_port[0].private_ip_address
+        if mgmt_port[0].association and 'PublicIp' in mgmt_port[0].association:
+            vdu.public_ip = mgmt_port[0].association['PublicIp']
+            #For now set managemnet ip also to public ip
+            #vdu.management_ip = vdu.public_ip
+        if vm_info.tags:
+            for tag in vm_info.tags:
+                if tag['Key'] == 'Name':
+                    vdu.name   = tag['Value']
+                elif tag['Key'] == 'node_id':
+                    vdu.node_id = tag['Value']
+        vdu.image_id = vm_info.image_id
+        vdu.flavor_id = vm_info.instance_type
+        if vm_info.state['Name'] == 'running':
+            vdu.state = 'active'
+        else:
+            vdu.state = 'inactive'
+        #if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
+        #    vdu.availability_zone = vm_info.placement['AvailabilityZone']
+        # Fill the port information
+        cp_port_list = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] != 0]
+
+        for port in cp_port_list:
+            c_point = vdu.connection_points.add()
+            RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+        return vdu
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        drv = self._get_driver(account)
+        network = drv.get_subnet(SubnetId=link_id)
+        port_list = drv.get_network_interface_list(SubnetId=link_id)
+        virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+        return virtual_link
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        networks = drv.get_subnet_list()
+        for network in networks:
+            port_list = drv.get_network_interface_list(SubnetId=network.id)
+            virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        drv = self._get_driver(account)
+        port     = drv.create_network_interface(SubnetId=c_point.virtual_link_id)
+        if c_point.name:
+            port.create_tags(Tags=[{'Key': 'Name','Value':c_point.name}])
+        if c_point.associate_public_ip:
+                drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port.id)
+        return port
+
+    def prepare_vdu_on_boot(self, account, server_id,vdu_init_params,vdu_port_list = None):
+        cmd = PREPARE_VM_CMD.format(key     = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_init_params.has_field('name'):
+            cmd += (" --vdu_name "+ vdu_init_params.name)
+        if vdu_init_params.has_field('node_id'):
+            cmd += (" --vdu_node_id "+ vdu_init_params.node_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        drv = self._get_driver(account)
+        ### First create required number of ports aka connection points
+        port_list = []
+
+        ### Now Create VM
+        kwargs = {}
+        kwargs['ImageId'] = vdu_init.image_id
+        if vdu_init.has_field('flavor_id'):
+            #Get instance type from flavor id which is of form c3.xlarge-1
+            inst_type =  vdu_init.flavor_id.split('-')[0]
+        else:
+            inst_type = drv.map_flavor_to_instance_type(ram       = vdu_init.vm_flavor.memory_mb,
+                                vcpus     = vdu_init.vm_flavor.vcpu_count,
+                                disk      = vdu_init.vm_flavor.storage_gb)
+
+        kwargs['InstanceType'] = inst_type
+        if vdu_init.vdu_init and vdu_init.vdu_init.userdata:
+            kwargs['UserData'] = vdu_init.vdu_init.userdata
+
+        #If we need to allocate public IP address create network interface and associate elastic
+        #ip  to interface
+        if vdu_init.allocate_public_address:
+           port_id     = drv.create_network_interface(SubnetId=drv.default_subnet_id)
+           drv.associate_public_ip_to_network_interface(NetworkInterfaceId = port_id.id)
+           network_interface  = {'NetworkInterfaceId':port_id.id,'DeviceIndex':0}
+           kwargs['NetworkInterfaces'] = [network_interface]
+
+        #AWS Driver will use default subnet id to create first network interface
+        # if network interface is not specified and will also have associate public ip
+        # if enabled for the subnet
+        vm_inst = drv.create_instance(**kwargs)
+
+        # Wait for instance to get to running state before attaching network interface
+        # to instance
+        #vm_inst[0].wait_until_running()
+
+        #if vdu_init.name:
+            #vm_inst[0].create_tags(Tags=[{'Key': 'Name','Value':vdu_init.name}])
+        #if vdu_init.node_id is not None:
+            #vm_inst[0].create_tags(Tags=[{'Key':'node_id','Value':vdu_init.node_id}])
+
+        # Create the connection points
+        port_list = []
+        for index,c_point in enumerate(vdu_init.connection_points):
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id.id)
+            #drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vm_inst[0].id,DeviceIndex=index+1)
+
+        # We wait for instance to get to running state and update name,node_id and attach network intfs
+        self.prepare_vdu_on_boot(account, vm_inst[0].id, vdu_init, port_list)
+
+        return vm_inst[0].id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        drv = self._get_driver(account)
+        port_list = []
+
+        vm_inst = drv.get_instance(vdu_modify.vdu_id)
+
+        if vm_inst.state['Name'] != 'running':
+            self.log.error("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+            raise exceptions.RWErrorFailure("RWCAL-AWS: VM with id %s is not in running state during modify VDU",vdu_modify.vdu_id)
+
+        port_list = drv.get_network_interface_list(InstanceId = vdu_modify.vdu_id)
+        used_device_indexs = [port.attachment['DeviceIndex'] for port in port_list if port.attachment]
+
+        device_index = 1
+        for c_point in vdu_modify.connection_points_add:
+            #Get unused device index
+            while device_index in used_device_indexs:
+                device_index = device_index+1
+            port_id = self._create_connection_point(account, c_point)
+            drv.attach_network_interface(NetworkInterfaceId = port_id.id,InstanceId = vdu_modify.vdu_id,DeviceIndex =device_index)
+
+        ### Detach the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            #Check if elastic IP is associated with interface and release it
+            if port  and port.association and 'AssociationId' in port.association:
+                drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
+            if port and port.attachment and port.attachment['DeviceIndex'] != 0:
+                drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
+            else:
+                self.log.error("RWCAL-AWS: Cannot modify connection port at index 0")
+
+        # Delete the connection points. Interfaces take time to get detached from instance and so
+        # we check status before doing delete network interface
+        for c_point in vdu_modify.connection_points_remove:
+            port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            retries = 0
+            if port and port.attachment and port.attachment['DeviceIndex'] == 0:
+                self.log.error("RWCAL-AWS: Cannot modify connection port at index 0")
+                continue
+            while port.status == 'in-use' and retries < 10:
+                time.sleep(5)
+                port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
+            drv.delete_network_interface(port.id)
+
+    def cleanup_vdu_on_term(self, account, server_id,vdu_port_list = None):
+        cmd = DELETE_VM_CMD.format(key    = account.aws.key,
+                                  secret  = account.aws.secret,
+                                  region  = account.aws.region,
+                                  server_id = server_id)
+        if vdu_port_list is not None:
+            for port_id in vdu_port_list:
+                cmd += (" --vdu_port_list "+ port_id)
+
+        exec_path = 'python3 ' + os.path.dirname(aws_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        drv = self._get_driver(account)
+        ### Get list of port on VM and delete them.
+        #vm_inst = drv.get_instance(vdu_id)
+
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        delete_port_list = [port.id for port in port_list if port.attachment and port.attachment['DeleteOnTermination'] is False]
+        drv.terminate_instance(vdu_id)
+
+        self.cleanup_vdu_on_term(account,vdu_id,delete_port_list)
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        drv = self._get_driver(account)
+
+        ### Get list of ports excluding the one for management network
+        vm = drv.get_instance(vdu_id)
+        port_list = drv.get_network_interface_list(InstanceId = vdu_id)
+        return RwcalAWSPlugin._fill_vdu_info(vm,port_list)
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        drv = self._get_driver(account)
+        vms = drv.list_instances()
+        for vm in vms:
+            ### Get list of ports excluding one for management network
+            port_list = [p for p in drv.get_network_interface_list(InstanceId = vm.id)]
+            vdu = RwcalAWSPlugin._fill_vdu_info(vm,
+                                                port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+        return vnf_resources
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt b/rwcal/plugins/vala/rwcal_cloudsim/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3250db9
--- /dev/null
@@ -0,0 +1,39 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsim)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_cloudsim rwcal_cloudsim.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/cloudsim/__init__.py
+    rift/rwcal/cloudsim/core.py
+    rift/rwcal/cloudsim/exceptions.py
+    rift/rwcal/cloudsim/image.py
+    rift/rwcal/cloudsim/lvm.py
+    rift/rwcal/cloudsim/lxc.py
+    rift/rwcal/cloudsim/net.py
+    rift/rwcal/cloudsim/shell.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/Makefile b/rwcal/plugins/vala/rwcal_cloudsim/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/core.py
new file mode 100644 (file)
index 0000000..86c1952
--- /dev/null
@@ -0,0 +1,367 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+
+from . import exceptions
+
+
+def unsupported(f):
+    @functools.wraps(f)
+    def impl(*args, **kwargs):
+        msg = '{} not supported'.format(f.__name__)
+        raise exceptions.RWErrorNotSupported(msg)
+
+    return impl
+
+
+class Cloud(object):
+    """
+    Cloud defines a base class for cloud driver implementations. Note that
+    not all drivers will support the complete set of functionality presented
+    here.
+    """
+
+    @unsupported
+    def get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+
+        @param account - a cloud account
+
+        @return a management network
+        """
+        pass
+
+    @unsupported
+    def create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param account - a cloud account
+        @param name    - name to assign to the tenant.
+        """
+        pass
+
+    @unsupported
+    def delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param account   - a cloud account
+        @param tenant_id - id of tenant to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_tenant_list(self, account):
+        """
+        List tenants.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param account - a cloud account
+        @param name    - name to assign to the role.
+        """
+        pass
+
+    @unsupported
+    def delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param account - a cloud account
+        @param role_id - id of role to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_role_list(self, account):
+        """
+        List roles.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_image(self, account, image):
+        """
+        Create an image
+
+        @param account - a cloud account
+        @param image   - a description of the image to create
+        """
+        pass
+
+    @unsupported
+    def delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param account  - a cloud account
+        @param image_id - Instance id of VM image to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_image(self, account, image_id):
+        """
+        Returns image information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param account - a cloud account
+        @param vm      - The info required to create a VM
+        """
+        pass
+
+    @unsupported
+    def start_vm(self, account, vm_id):
+        """
+        start an existing virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to start
+        """
+        pass
+
+    @unsupported
+    def stop_vm(self, account, vm_id):
+        """
+        Stop a running virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - The id of the VM to stop
+        """
+        pass
+
+    @unsupported
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param account - a cloud account
+        @param vm_id   - Instance id of VM to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_vm_list(self, account):
+        """
+        Return a list of vms.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_vm(self, account):
+        """
+        Return vm information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param account - a cloud account
+        @param flavor  - Flavor object
+        """
+        pass
+
+    @unsupported
+    def delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param account   - a cloud account
+        @param flavor_id - Flavor id to be deleted.
+        """
+        pass
+
+    @unsupported
+    def get_flavor_list(self, account):
+        """
+        Return a list of flavors.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_flavor(self, account):
+        """
+        Return flavor information.
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def get_network(self, account, network_id):
+        """
+        Return a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_network_list(self, account):
+        """
+        Return a list of networks
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_network(self, account, network):
+        """
+        Create a new network
+
+        @param account - a cloud account
+        @param network - Network object
+        """
+        pass
+
+    @unsupported
+    def delete_network(self, account, network_id):
+        """
+        Delete a network
+
+        @param account    - a cloud account
+        @param network_id - unique network identifier
+        """
+        pass
+
+    @unsupported
+    def get_port(self, account, port_id):
+        """
+        Return a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def get_port_list(self, account):
+        """
+        Return a list of ports
+
+        @param account - a cloud account
+        """
+        pass
+
+    @unsupported
+    def create_port(self, account, port):
+        """
+        Create a new port
+
+        @param account - a cloud account
+        @param port    - port object
+        """
+        pass
+
+    @unsupported
+    def delete_port(self, account, port_id):
+        """
+        Delete a port
+
+        @param account - a cloud account
+        @param port_id - unique port identifier
+        """
+        pass
+
+    @unsupported
+    def add_host(self, account, host):
+        """
+        Add a new host
+
+        @param account - a cloud account
+        @param host    - a host object
+        """
+        pass
+
+    @unsupported
+    def remove_host(self, account, host_id):
+        """
+        Remove a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host(self, account, host_id):
+        """
+        Return a host
+
+        @param account - a cloud account
+        @param host_id - unique host identifier
+        """
+        pass
+
+    @unsupported
+    def get_host_list(self, account):
+        """
+        Return a list of hosts
+
+        @param account - a cloud account
+        """
+        pass
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/exceptions.py
new file mode 100644 (file)
index 0000000..3bb3aa7
--- /dev/null
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Rift Exceptions:
+#   These exceptions each coorespond with a rift status as they are defined
+# in rwtypes.vala.  Adding them here so that errors from C transistioning
+# back to python can be handled in a pythonic manner rather than having to
+# inspect return values.
+
+class RWErrorFailure(Exception):
+  pass
+
+class RWErrorDuplicate(Exception):
+  pass
+
+class RWErrorNotFound(Exception):
+  pass
+
+class RWErrorOutOfBounds(Exception):
+  pass
+
+class RWErrorBackpressure(Exception):
+  pass
+
+class RWErrorTimeout(Exception):
+  pass
+
+class RWErrorExists(Exception):
+  pass
+
+class RWErrorNotEmpty(Exception):
+  pass
+
+class RWErrorNotConnected(Exception):
+  pass
+
+class RWErrorNotSupported(Exception):
+  pass
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/image.py
new file mode 100644 (file)
index 0000000..620dcc4
--- /dev/null
@@ -0,0 +1,40 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import math
+import re
+
+from . import shell
+
+
+class ImageInfoError(Exception):
+    pass
+
+
+def qcow2_virtual_size_mbytes(qcow2_filepath):
+    info_output = shell.command("qemu-img info {}".format(qcow2_filepath))
+    for line in info_output:
+        if line.startswith("virtual size"):
+            match = re.search("\(([0-9]*) bytes\)", line)
+            if match is None:
+                raise ImageInfoError("Could not parse image size")
+
+            num_bytes = int(match.group(1))
+            num_mbytes = num_bytes / 1024 / 1024
+            return math.ceil(num_mbytes)
+
+    raise ImageInfoError("Could not image virtual size field in output")
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lvm.py
new file mode 100644 (file)
index 0000000..4ae4de9
--- /dev/null
@@ -0,0 +1,280 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import logging
+import os
+import re
+
+from . import shell
+
+
+logger = logging.getLogger(__name__)
+
+
+class PhysicalVolume(
+        collections.namedtuple(
+            "PhysicalVolume", [
+                "pv",
+                "vg",
+                "fmt",
+                "attr",
+                "psize",
+                "pfree",
+                ]
+            )
+        ):
+    pass
+
+
+class VolumeGroup(
+        collections.namedtuple(
+            "VolumeGroup", [
+                "vg",
+                "num_pv",
+                "num_lv",
+                "num_sn",
+                "attr",
+                "vsize",
+                "vfree",
+                ]
+            )
+        ):
+    pass
+
+
+class LoopbackVolumeGroup(object):
+    def __init__(self, name):
+        self._name = name
+
+    def __repr__(self):
+        return repr({
+            "name": self.name,
+            "filepath": self.filepath,
+            "loopback": self.loopback,
+            "exists": self.exists,
+            "volume_group": self.volume_group,
+            })
+
+    @property
+    def exists(self):
+        return any(v.vg == self.name for v in volume_groups())
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def filepath(self):
+        return find_backing_file(self.name)
+
+    @property
+    def loopback(self):
+        return find_loop_device(self.name)
+
+    @property
+    def volume_group(self):
+        for vgroup in volume_groups():
+            if vgroup.vg == self.name:
+                return vgroup
+
+    @property
+    def physical_volume(self):
+        for pvolume in physical_volumes():
+            if pvolume.vg == self.name:
+                return pvolume
+
+    @property
+    def size(self):
+        return os.path.getsize(self.filepath)
+
+    def extend_mbytes(self, num_mbytes):
+        """ Extend the size of the Loopback volume group
+
+        Arguments:
+            num_bytes - Number of megabytes to extend by
+        """
+
+        # Extend the size of the backing store
+        shell.command('truncate -c -s +{}M {}'.format(
+            num_mbytes, self.filepath)
+            )
+
+        # Notify loopback driver of the resized backing store
+        shell.command('losetup -c {}'.format(self.loopback))
+
+        # Expand the physical volume to match new size
+        shell.command('pvresize {}'.format(self.physical_volume.pv))
+
+
+def find_loop_device(volume):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return pvolume.pv
+
+    return None
+
+
+def find_backing_file(volume):
+    """
+    /dev/loop0: [64513]:414503 (/lvm/rift.img)
+
+    """
+    loop = find_loop_device(volume)
+    if loop is None:
+        return None
+
+    output = shell.command("losetup {}".format(loop))[0]
+    return re.search('.*\(([^)]*)\).*', output).group(1)
+
+
+def create(volume="rift", filepath="/lvm/rift.img"):
+    """
+    First, we create a loopback device using a file that we put in the file
+    system where running this from. Second, we create an LVM volume group onto
+    the loop device that was just created
+    """
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            raise ValueError("VolumeGroup %s already exists" % volume)
+
+    # Delete the existing backing file if it exists
+    if os.path.exists(filepath):
+        os.remove(filepath)
+
+    # Create the file that will be used as the backing store
+    if not os.path.exists(os.path.dirname(filepath)):
+        os.makedirs(os.path.dirname(filepath))
+
+    # Create a minimal file to hold any LVM physical volume metadata
+    shell.command('truncate -s 50M {}'.format(filepath))
+
+    # Acquire the next available loopback device
+    loopback = shell.command('losetup -f --show {}'.format(filepath))[0]
+
+    # Create a physical volume
+    shell.command('pvcreate {}'.format(loopback))
+
+    # Create a volume group
+    shell.command('vgcreate {} {}'.format(volume, loopback))
+
+    return LoopbackVolumeGroup(volume)
+
+
+def get(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            return LoopbackVolumeGroup(pvolume.vg)
+
+
+def destroy(volume="rift"):
+    pvolumes = physical_volumes()
+    for pvolume in pvolumes:
+        if pvolume.vg == volume:
+            break
+    else:
+        return
+
+    # Cache the backing file path
+    filepath = find_backing_file(volume)
+
+    # Remove the volume group
+    shell.command('vgremove -f {}'.format(pvolume.vg))
+
+    # Remove the physical volume
+    shell.command('pvremove -y {}'.format(pvolume.pv))
+
+    # Release the loopback device
+    shell.command('losetup -d {}'.format(pvolume.pv))
+
+    # Remove the backing file
+    os.remove(filepath)
+
+
+def physical_volumes():
+    """Returns a list of physical volumes"""
+    cmd = 'pvs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "PV": "pv",
+            "VG": "vg",
+            "Fmt": "fmt",
+            "Attr": "attr",
+            "PSize": "psize",
+            "PFree": "pfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the physical volume objects
+    volumes = []
+    for values in transpose[1:]:
+        volume = {}
+        for k, v in zip(keys, values):
+            volume[mapping[k]] = v
+
+        volumes.append(PhysicalVolume(**volume))
+
+    return volumes
+
+
+def volume_groups():
+    """Returns a list of volume groups"""
+    cmd = 'vgs --separator "," --rows'
+    lines = [line.strip().split(',') for line in shell.command(cmd)]
+    if not lines:
+        return []
+
+    mapping = {
+            "VG": "vg",
+            "#PV": "num_pv",
+            "#LV": "num_lv",
+            "#SN": "num_sn",
+            "Attr": "attr",
+            "VSize": "vsize",
+            "VFree": "vfree",
+            }
+
+    # Transpose the data so that the first element of the list is a list of
+    # keys.
+    transpose = list(map(list, zip(*lines)))
+
+    # Extract keys
+    keys = transpose[0]
+
+    # Iterate over the remaining data and create the volume groups
+    groups = []
+    for values in transpose[1:]:
+        group = {}
+        for k, v in zip(keys, values):
+            group[mapping[k]] = v
+
+        groups.append(VolumeGroup(**group))
+
+    return groups
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/lxc.py
new file mode 100644 (file)
index 0000000..9cbde9d
--- /dev/null
@@ -0,0 +1,534 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import contextlib
+import functools
+import logging
+import os
+import re
+import shutil
+import uuid
+
+from . import shell
+from . import image
+from . import lvm
+
+
+logger = logging.getLogger(__name__)
+
+
+class ValidationError(Exception):
+    pass
+
+
+@contextlib.contextmanager
+def mount(mountpoint, path):
+    """Mounts a device and unmounts it upon exit"""
+    shell.command('mount {} {}'.format(mountpoint, path))
+    logger.debug('mount {} {}'.format(mountpoint, path))
+    yield
+    # os.sync()
+    shell.command('umount {}'.format(path))
+    logger.debug('umount {}'.format(path))
+
+
+def create_container(name, template_path, volume, rootfs_qcow2file):
+    """Create a new container
+
+    Arguments:
+        name          - the name of the new container
+        template_path - the template defines the type of container to create
+        volume        - the volume group that the container will be in
+        roots_tarfile - a path to a tarfile that contains the rootfs
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = 'lxc-create -t {} -n {} -B lvm --fssize {}M --vgname {}'
+    cmd += " -- --rootfs-qcow2file {}".format(rootfs_qcow2file)
+    cmd += " 2>&1 | tee -a /var/log/rift_lxc.log"
+    virtual_size_mbytes = image.qcow2_virtual_size_mbytes(rootfs_qcow2file)
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(virtual_size_mbytes)
+
+    shell.command(cmd.format(
+        template_path, name, virtual_size_mbytes, volume
+        ))
+
+    return Container(name, volume=volume, size_mbytes=virtual_size_mbytes)
+
+
+def create_snapshot(base, name, volume, size_mbytes):
+    """Create a clone of an existing container
+
+    Arguments:
+        base     - the name of the existing container
+        name     - the name to give to the clone
+        volume   - the volume group that the container will be in
+
+    Returns:
+        A Container object for the new snapshot
+
+    """
+    cmd = '/bin/bash lxc-clone -o {} -n {} --vgname {} --snapshot'
+
+    loop_volume = lvm.get(volume)
+    loop_volume.extend_mbytes(size_mbytes)
+
+    try:
+        shell.command(cmd.format(base, name, volume))
+
+    except shell.ProcessError as e:
+        # Skip the error that occurs here. It is corrected during configuration
+        # and results from a bug in the lxc script.
+
+        # In lxc-clone, when cloning multiple times from the same container
+        # it is possible that the lvrename operation fails to rename the
+        # file in /dev/rift (but the logical volume is renamed).
+        # This logic below resolves this particular scenario.
+        if "lxc-clone: failed to mount new rootfs" in str(e):
+            os.rmdir("/dev/rift/{name}".format(name=name))
+            shutil.move("/dev/rift/{name}_snapshot".format(name=name),
+                        "/dev/rift/{name}".format(name=name)
+                        )
+
+        elif "mkdir: cannot create directory" not in str(e):
+            raise
+
+    return Container(name, volume=volume, size_mbytes=size_mbytes)
+
+
+def purge_cache():
+    """Removes any cached templates"""
+    shell.command('rm -rf /var/cache/lxc/*')
+
+
+def force_clean():
+    """Force cleanup of the lxc directory"""
+
+    lxc_dir = "/var/lib/lxc/"
+    try:
+        shell.command('rm -rf {}*'.format(lxc_dir))
+    except shell.ProcessError:
+        for directory in os.listdir(lxc_dir):
+            path = os.path.join(lxc_dir, directory, "rootfs")
+            # Sometimes we might not be able to destroy container, if the
+            # device is still mounted so unmount it first.
+            shell.command("umount {}".format(path))
+            shell.command('rm -rf {}*'.format(lxc_dir))
+
+
+def containers():
+    """Returns a list of containers"""
+    return [c for c in shell.command('lxc-ls') if c]
+
+
+def destroy(name):
+    """Destroys a container
+
+    Arguments:
+        name - the name of the container to destroy
+
+    """
+    shell.command('lxc-destroy -n {}'.format(name))
+
+
+def start(name):
+    """Starts a container
+
+    Arguments:
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-start -d -n {} -l DEBUG'.format(name))
+
+
+def stop(name):
+    """Stops a container
+
+    Arguments
+        name - the name of the container to start
+
+    """
+    shell.command('lxc-stop -n {}'.format(name))
+
+
+def state(name):
+    """Returns the current state of a container
+
+    Arguments:
+        name - the name of the container whose state is retuned
+
+    Returns:
+        A string describing the state of the container
+
+    """
+    _, state = shell.command('lxc-info -s -n {}'.format(name))[0].split()
+    return state
+
+
+def ls():
+    """Prints the output from 'lxc-ls --fancy'"""
+    print('\n'.join(shell.command('lxc-ls --fancy')))
+
+
+def ls_info():
+    lxc_info = shell.command('lxc-ls --fancy --active --fancy-format=name,ipv4')
+
+    lxc_to_ip = {}
+
+    line_regex = re.compile("(.*?)\.(.*?)\.(.*?)\.(.*?)\.")
+    for lxc in lxc_info:
+        if line_regex.match(lxc):
+            lxc_name = lxc.split()[0]
+
+            ips = lxc.split()[1:]
+            lxc_to_ip[lxc_name] = [ip.replace(",", "") for ip in ips]
+
+    return lxc_to_ip
+
+
+def validate(f):
+    """
+    This decorator is used to check that a given container exists. If the
+    container does not exist, a ValidationError is raised.
+
+    """
+    @functools.wraps(f)
+    def impl(self, *args, **kwargs):
+        if self.name not in containers():
+            msg = 'container ({}) does not exist'.format(self.name)
+            raise ValidationError(msg)
+
+        return f(self, *args, **kwargs)
+
+    return impl
+
+
+class Container(object):
+    """
+    This class provides an interface to an existing container on the system.
+    """
+
+    def __init__(self, name, size_mbytes=4096, volume="rift", hostname=None):
+        self._name = name
+        self._size_mbytes = size_mbytes
+        self._volume = volume
+        self.hostname = name if hostname is None else hostname
+
+    @property
+    def name(self):
+        """The name of the container"""
+        return self._name
+
+    @property
+    def size(self):
+        """The virtual size of the container"""
+        return self._size_mbytes
+
+    @property
+    def volume(self):
+        """The volume that the container is a part of"""
+        return self._volume
+
+    @property
+    def loopback_volume(self):
+        """ Instance of lvm.LoopbackVolumeGroup """
+        return lvm.get(self.volume)
+
+    @property
+    @validate
+    def state(self):
+        """The current state of the container"""
+        return state(self.name)
+
+    @validate
+    def start(self):
+        """Starts the container"""
+        start(self.name)
+
+    @validate
+    def stop(self):
+        """Stops the container"""
+        stop(self.name)
+
+    @validate
+    def destroy(self):
+        """Destroys the container"""
+        destroy(self.name)
+
+    @validate
+    def info(self):
+        """Returns info about the container"""
+        return shell.command('lxc-info -n {}'.format(self.name))
+
+    @validate
+    def snapshot(self, name):
+        """Create a snapshot of this container
+
+        Arguments:
+            name - the name of the snapshot
+
+        Returns:
+            A Container representing the new snapshot
+
+        """
+        return create_snapshot(self.name, name, self.volume, self.size)
+
+    @validate
+    def configure(self, config, volume='rift', userdata=None):
+        """Configures the container
+
+        Arguments:
+            config   - a container configuration object
+            volume   - the volume group that the container will belong to
+            userdata - a string containing userdata that will be passed to
+                       cloud-init for execution
+
+        """
+        # Create the LXC config file
+        with open("/var/lib/lxc/{}/config".format(self.name), "w") as fp:
+            fp.write(str(config))
+            logger.debug('created /var/lib/lxc/{}/config'.format(self.name))
+
+        # Mount the rootfs of the container and configure the hosts and
+        # hostname files of the container.
+        rootfs = '/var/lib/lxc/{}/rootfs'.format(self.name)
+        os.makedirs(rootfs, exist_ok=True)
+
+        with mount('/dev/rift/{}'.format(self.name), rootfs):
+
+            # Create /etc/hostname
+            with open(os.path.join(rootfs, 'etc/hostname'), 'w') as fp:
+                fp.write(self.hostname + '\n')
+                logger.debug('created /etc/hostname')
+
+            # Create /etc/hostnames
+            with open(os.path.join(rootfs, 'etc/hosts'), 'w') as fp:
+                fp.write("127.0.0.1 localhost {}\n".format(self.hostname))
+                fp.write("::1 localhost {}\n".format(self.hostname))
+                logger.debug('created /etc/hosts')
+
+            # Disable autofs (conflicts with lxc workspace mount bind)
+            autofs_service_file = os.path.join(
+                    rootfs,
+                    "etc/systemd/system/multi-user.target.wants/autofs.service",
+                    )
+            if os.path.exists(autofs_service_file):
+                os.remove(autofs_service_file)
+
+            # Setup the mount points
+            for mount_point in config.mount_points:
+                mount_point_path = os.path.join(rootfs, mount_point.remote)
+                os.makedirs(mount_point_path, exist_ok=True)
+
+            # Copy the cloud-init script into the nocloud seed directory
+            if userdata is not None:
+                try:
+                    userdata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/user-data')
+                    os.makedirs(os.path.dirname(userdata_dst))
+                except FileExistsError:
+                    pass
+
+                try:
+                    with open(userdata_dst, 'w') as fp:
+                        fp.write(userdata)
+                except Exception as e:
+                    logger.exception(e)
+
+                # Cloud init requires a meta-data file in the seed location
+                metadata = "instance_id: {}\n".format(str(uuid.uuid4()))
+                metadata += "local-hostname: {}\n".format(self.hostname)
+
+                try:
+                    metadata_dst = os.path.join(rootfs, 'var/lib/cloud/seed/nocloud/meta-data')
+                    with open(metadata_dst, 'w') as fp:
+                        fp.write(metadata)
+
+                except Exception as e:
+                    logger.exception(e)
+
+
+class ContainerConfig(object):
+    """
+    This class represents the config file that is used to define the interfaces
+    on a container.
+    """
+
+    def __init__(self, name, volume='rift'):
+        self.name = name
+        self.volume = volume
+        self.networks = []
+        self.mount_points = []
+        self.cgroups = ControlGroupsConfig()
+
+    def add_network_config(self, network_config):
+        """Add a network config object
+
+        Arguments:
+            network_config - the network config object to add
+
+        """
+        self.networks.append(network_config)
+
+    def add_mount_point_config(self, mount_point_config):
+        """Add a mount point to the configuration
+
+        Arguments,
+            mount_point_config - a MountPointConfig object
+
+        """
+        self.mount_points.append(mount_point_config)
+
+    def __repr__(self):
+        fields = """
+            lxc.rootfs = /dev/{volume}/{name}
+            lxc.utsname = {utsname}
+            lxc.tty = 4
+            lxc.pts = 1024
+            lxc.mount = /var/lib/lxc/{name}/fstab
+            lxc.cap.drop = sys_module mac_admin mac_override sys_time
+            lxc.kmsg = 0
+            lxc.autodev = 1
+            lxc.kmsg = 0
+            """.format(volume=self.volume, name=self.name, utsname=self.name)
+
+        fields = '\n'.join(n.strip() for n in fields.splitlines())
+        cgroups = '\n'.join(n.strip() for n in str(self.cgroups).splitlines())
+        networks = '\n'.join(str(n) for n in self.networks)
+        mount_points = '\n'.join(str(n) for n in self.mount_points)
+
+        return '\n'.join((fields, cgroups, networks, mount_points))
+
+
+class ControlGroupsConfig(object):
+    """
+    This class represents the control group configuration for a container
+    """
+
+    def __repr__(self):
+        return """
+            #cgroups
+            lxc.cgroup.devices.deny = a
+
+            # /dev/null and zero
+            lxc.cgroup.devices.allow = c 1:3 rwm
+            lxc.cgroup.devices.allow = c 1:5 rwm
+
+            # consoles
+            lxc.cgroup.devices.allow = c 5:1 rwm
+            lxc.cgroup.devices.allow = c 5:0 rwm
+            lxc.cgroup.devices.allow = c 4:0 rwm
+            lxc.cgroup.devices.allow = c 4:1 rwm
+
+            # /dev/{,u}random
+            lxc.cgroup.devices.allow = c 1:9 rwm
+            lxc.cgroup.devices.allow = c 1:8 rwm
+            lxc.cgroup.devices.allow = c 136:* rwm
+            lxc.cgroup.devices.allow = c 5:2 rwm
+
+            # rtc
+            lxc.cgroup.devices.allow = c 254:0 rm
+            """
+
+
+class NetworkConfig(collections.namedtuple(
+    "NetworkConfig", [
+        "type",
+        "link",
+        "flags",
+        "name",
+        "veth_pair",
+        "ipv4",
+        "ipv4_gateway",
+        ]
+    )):
+    """
+    This class represents a network interface configuration for a container.
+    """
+
+    def __new__(cls,
+            type,
+            link,
+            name,
+            flags='up',
+            veth_pair=None,
+            ipv4=None,
+            ipv4_gateway=None,
+            ):
+        return super(NetworkConfig, cls).__new__(
+                cls,
+                type,
+                link,
+                flags,
+                name,
+                veth_pair,
+                ipv4,
+                ipv4_gateway,
+                )
+
+    def __repr__(self):
+        fields = [
+                "lxc.network.type = {}".format(self.type),
+                "lxc.network.link = {}".format(self.link),
+                "lxc.network.flags = {}".format(self.flags),
+                "lxc.network.name = {}".format(self.name),
+                ]
+
+        if self.veth_pair is not None:
+            fields.append("lxc.network.veth.pair = {}".format(self.veth_pair))
+
+        if self.ipv4 is not None:
+            fields.append("lxc.network.ipv4 = {}/24".format(self.ipv4))
+
+        if self.ipv4_gateway is not None:
+            fields.append("lxc.network.ipv4.gateway = {}".format(self.ipv4_gateway))
+
+        header = ["# Start {} configuration".format(self.name)]
+        footer = ["# End {} configuration\n".format(self.name)]
+
+        return '\n'.join(header + fields + footer)
+
+
+class MountConfig(collections.namedtuple(
+    "ContainerMountConfig", [
+        "local",
+        "remote",
+        "read_only",
+        ]
+    )):
+    """
+    This class represents a mount point configuration for a container.
+    """
+
+    def __new__(cls, local, remote, read_only=True):
+        return super(MountConfig, cls).__new__(
+                cls,
+                local,
+                remote,
+                read_only,
+                )
+
+    def __repr__(self):
+        return "lxc.mount.entry = {} {} none {}bind 0 0\n".format(
+                self.local,
+                self.remote,
+                "" if not self.read_only else "ro,"
+                )
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/net.py
new file mode 100644 (file)
index 0000000..517356b
--- /dev/null
@@ -0,0 +1,147 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+
+import netifaces
+
+from . import shell
+
+
+class VirshError(Exception):
+    pass
+
+
+def create(network, ip_interface=None):
+    """ Create, assign ip and bring up a bridge interface
+
+    Arguments:
+        network - The network name
+        ip_interface - An ipaddress.IPv4Interface instance
+    """
+    bridge_add(network)
+    if ip_interface is not None:
+        bridge_addr(
+                network,
+                str(ip_interface),
+                str(ip_interface.network.broadcast_address),
+                )
+    bridge_up(network)
+
+
+def delete(network):
+    bridge_down(network)
+    bridge_remove(network)
+
+
+def bridge_add(network):
+    shell.command("/usr/sbin/brctl addbr {network}".format(network=network))
+
+
+def bridge_remove(network):
+    shell.command("/usr/sbin/brctl delbr {network}".format(network=network))
+
+
+def bridge_addr(network, addr, broadcast):
+    cmd = "ip addr add {addr} broadcast {broadcast} dev {network}"
+    shell.command(cmd.format(addr=addr, broadcast=broadcast, network=network))
+
+
+def bridge_exists(network):
+    return network in netifaces.interfaces()
+
+
+def bridge_down(network):
+    shell.command('ip link set {network} down'.format(network=network))
+
+
+def bridge_up(network):
+    shell.command('ip link set {network} up'.format(network=network))
+
+
+def bridge_addresses(network):
+    try:
+        address = netifaces.ifaddresses(network)[netifaces.AF_INET][0]
+
+    except KeyError:
+        raise ValueError('unable to find subnet for {}'.format(network))
+
+    cls = collections.namedtuple('BridgeAddresses', 'addr netmask broadcast')
+    return cls(**address)
+
+
+VirshNetwork = collections.namedtuple(
+    'VirshNetwork', 'name state autostart persistant')
+
+
+def virsh_list_networks():
+    lines = shell.command('virsh net-list --all')
+    if len(lines) < 2:
+        raise Exception("Expected two lines from virsh net-list output")
+
+    network_lines = lines[2:]
+    virsh_networks = []
+    for line in network_lines:
+        if not line.strip():
+            continue
+
+        (name, state, autostart, persistant) = line.split()
+        virsh_networks.append(
+                VirshNetwork(name, state, autostart, persistant)
+                )
+
+    return virsh_networks
+
+
+def virsh_list_network_names():
+    virsh_networks = virsh_list_networks()
+    return [n.name for n in virsh_networks]
+
+
+def virsh_is_active(network_name):
+    virsh_networks = virsh_list_networks()
+    for network in virsh_networks:
+        if network.name == network_name:
+            return network.state == "active"
+
+    raise VirshError("Did not find virsh network %s" % network_name)
+
+
+def virsh_define_default():
+    shell.command('virsh net-define /usr/share/libvirt/networks/default.xml')
+
+
+def virsh_start(network_name):
+    shell.command('virsh net-start %s' % network_name)
+
+
+def virsh_initialize_default():
+    if "default" not in virsh_list_network_names():
+        virsh_define_default()
+
+    if virsh_is_active("default"):
+        if bridge_exists("virbr0"):
+            bridge_down("virbr0")
+
+        virsh_destroy("default")
+
+    virsh_start("default")
+
+
+def virsh_destroy(network_name):
+    shell.command('virsh net-destroy %s' % network_name)
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py b/rwcal/plugins/vala/rwcal_cloudsim/rift/rwcal/cloudsim/shell.py
new file mode 100644 (file)
index 0000000..41a96ae
--- /dev/null
@@ -0,0 +1,46 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import subprocess
+
+
+logger = logging.getLogger(__name__)
+
+
+class ProcessError(Exception):
+    pass
+
+
+def command(cmd):
+    logger.debug('executing: {}'.format(cmd))
+
+    process = subprocess.Popen(
+            cmd,
+            shell=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            )
+
+    stdout, stderr = process.communicate()
+    process.wait()
+
+    if process.returncode != 0:
+        raise ProcessError(stderr.decode())
+
+    return stdout.decode().splitlines()
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py b/rwcal/plugins/vala/rwcal_cloudsim/rwcal_cloudsim.py
new file mode 100644 (file)
index 0000000..6da8a2e
--- /dev/null
@@ -0,0 +1,1430 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import hashlib
+import itertools
+import logging
+import os
+import time
+import uuid
+
+import ipaddress
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.net as net
+import rift.rwcal.cloudsim.exceptions as exceptions
+
+logger = logging.getLogger('rwcal.cloudsim')
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+class CreateNetworkError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+
+
+def rwcal_copy_object(obj):
+    dup = obj.__class__()
+    dup.copy_from(obj)
+    return dup
+
+
+MGMT_NETWORK_NAME = "virbr0"
+MGMT_NETWORK_INTERFACE_IP = ipaddress.IPv4Interface("192.168.122.1/24")
+
+
+class IPPoolError(Exception):
+    pass
+
+
+class NetworkIPPool(object):
+    def __init__(self, subnet):
+        self._network = ipaddress.IPv4Network(subnet)
+        self._ip_gen = self._network.hosts()
+        self._allocated_ips = []
+        self._unallocated_ips = []
+
+    def allocate_ip(self):
+        try:
+            ip = str(next(self._ip_gen))
+        except StopIteration:
+            try:
+                ip = self._unallocated_ips.pop()
+            except IndexError:
+                raise IPPoolError("All ip addresses exhausted")
+
+        self._allocated_ips.append(ip)
+        return ip
+
+    def deallocate_ip(self, ip):
+        if ip not in self._allocated_ips:
+            raise ValueError("Did not find IP %s in allocate ip pool")
+
+        self._allocated_ips.remove(ip)
+        self._unallocated_ips.append(ip)
+
+
+class CalManager(object):
+    def __init__(self):
+        self._vms = {}
+        self._ports = {}
+        self._images = {}
+        self._networks = {}
+        self.flavors = {}
+
+        self._port_to_vm = {}
+        self._vm_to_image = {}
+        self._port_to_network = {}
+        self._network_to_ip_pool = {}
+
+        self._vm_to_ports = collections.defaultdict(list)
+        self._image_to_vms = collections.defaultdict(list)
+        self._network_to_ports = collections.defaultdict(list)
+
+        self._vm_id_gen = itertools.count(1)
+        self._network_id_gen = itertools.count(1)
+        self._image_id_gen = itertools.count(1)
+
+    def add_image(self, image):
+        image_id = str(next(self._image_id_gen))
+        self._images[image_id] = image
+
+        return image_id
+
+    def remove_image(self, image_id):
+        for vm_id in self.get_image_vms(image_id):
+            self.remove_vm(vm_id)
+
+        del self._images[image_id]
+        del self._image_to_vms[image_id]
+
+    def get_image(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._images[image_id]
+
+    def get_image_list(self):
+        return list(self._images.values())
+
+    def get_image_vms(self, image_id):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        return self._image_to_vms[image_id]
+
+    def add_port(self, network_id, vm_id, port):
+        if network_id not in self._networks:
+            msg = "Unable to find network {}"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        if vm_id not in self._vms:
+            msg = "Unable to find vm {}"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._vm_to_ports[vm_id].append(port_id)
+        self._network_to_ports[network_id].append(port_id)
+
+        self._port_to_vm[port_id] = vm_id
+        self._port_to_network[port_id] = network_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        network_id = self._port_to_network[port_id]
+        vm_id = self._port_to_vm[port_id]
+
+        self._vm_to_ports[vm_id].remove(port_id)
+        self._network_to_ports[network_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_vm[port_id]
+        del self._port_to_network[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def get_port_list(self):
+        return list(self._ports.values())
+
+    def add_network(self, network):
+        network_id = str(next(self._network_id_gen))
+        self._networks[network_id] = network
+
+        return network_id
+
+    def remove_network(self, network_id):
+        for port_id in self.get_network_ports(network_id):
+            self.remove_port(port_id)
+
+        del self._networks[network_id]
+
+    def get_network(self, network_id):
+        return self._networks[network_id]
+
+    def add_network_ip_pool(self, network_id, ip_pool):
+        self._network_to_ip_pool[network_id] = ip_pool
+
+    def get_network_ip_pool(self, network_id):
+        return self._network_to_ip_pool[network_id]
+
+    def remove_network_ip_pool(self, network_id):
+        del self._network_to_ip_pool[network_id]
+
+    def get_network_list(self):
+        return list(self._networks.values())
+
+    def get_network_ports(self, network_id):
+        return self._network_to_ports[network_id]
+
+    def add_vm(self, image_id, vm):
+        if image_id not in self._images:
+            msg = "Unable to find image {}"
+            raise exceptions.RWErrorNotFound(msg.format(image_id))
+
+        vm_id = str(next(self._vm_id_gen))
+        self._vms[vm_id] = vm
+
+        self._vm_to_image[vm_id] = image_id
+        self._image_to_vms[image_id].append(vm_id)
+
+        return vm_id
+
+    def remove_vm(self, vm_id):
+        for port_id in self.get_vm_ports(vm_id):
+            self.remove_port(port_id)
+
+        image_id = self._vm_to_image[vm_id]
+
+        self._image_to_vms[image_id].remove(vm_id)
+
+        del self._vms[vm_id]
+        del self._vm_to_image[vm_id]
+
+    def get_vm(self, vm_id):
+        return self._vms[vm_id]
+
+    def get_vm_list(self):
+        return list(self._vms.values())
+
+    def get_vm_ports(self, vm_id):
+        return self._vm_to_ports[vm_id]
+
+
+class LxcManager(object):
+    def __init__(self):
+        self._containers = {}
+        self._ports = {}
+        self._bridges = {}
+
+        self._port_to_container = {}
+        self._port_to_bridge = {}
+
+        self._container_to_ports = collections.defaultdict(list)
+        self._bridge_to_ports = collections.defaultdict(list)
+
+        # Create the management network
+        self.mgmt_network = RwcalYang.NetworkInfoItem()
+        self.mgmt_network.network_name = MGMT_NETWORK_NAME
+
+        network = MGMT_NETWORK_INTERFACE_IP.network
+        self.mgmt_network.subnet = str(network)
+
+        # Create/Start the default virtd network for NAT-based
+        # connectivity inside containers (http://wiki.libvirt.org/page/Networking)
+        if "default" not in net.virsh_list_network_names():
+            logger.debug("default virtd network not found.  Creating.")
+            net.virsh_define_default()
+
+            # The default virsh profile create a virbr0 interface
+            # with a 192.168.122.1 ip address.  Also sets up iptables
+            # for NAT access.
+            net.virsh_start("default")
+
+        # Create the IP pool
+        mgmt_network_hosts = network.hosts()
+
+        # Remove the management interface ip from the pool
+        self._mgmt_ip_pool = list(mgmt_network_hosts)
+        self._mgmt_ip_pool.remove(MGMT_NETWORK_INTERFACE_IP.ip)
+
+    def acquire_mgmt_ip(self):
+        """Returns an IP address from the available pool"""
+        # TODO these ips will need to be recycled at some point
+        return str(self._mgmt_ip_pool.pop())
+
+    def add_port(self, bridge_id, container_id, port):
+        if bridge_id not in self._bridges:
+            msg = "Unable to find bridge {}"
+            raise exceptions.RWErrorNotFound(msg.format(bridge_id))
+
+        if container_id not in self._containers:
+            msg = "Unable to find container {}"
+            raise exceptions.RWErrorNotFound(msg.format(container_id))
+
+        port_id = str(uuid.uuid4())
+        self._ports[port_id] = port
+
+        self._container_to_ports[container_id].append(port_id)
+        self._bridge_to_ports[bridge_id].append(port_id)
+
+        self._port_to_container[port_id] = container_id
+        self._port_to_bridge[port_id] = bridge_id
+
+        return port_id
+
+    def remove_port(self, port_id):
+        if port_id not in self._ports:
+            msg = "Unable to find port {}"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        bridge_id = self._port_to_bridge[port_id]
+        container_id = self._port_to_container[port_id]
+
+        self._container_to_ports[container_id].remove(port_id)
+        self._bridge_to_ports[bridge_id].remove(port_id)
+
+        del self._ports[port_id]
+        del self._port_to_bridge[port_id]
+        del self._port_to_container[port_id]
+
+    def get_port(self, port_id):
+        return self._ports[port_id]
+
+    def add_bridge(self, bridge):
+        bridge_id = str(uuid.uuid4())
+        self._bridges[bridge_id] = bridge
+
+        return bridge_id
+
+    def remove_bridge(self, bridge_id):
+        for port_id in self._bridge_to_ports[bridge_id]:
+            self.remove_port(port_id)
+
+        del self._bridges[bridge_id]
+
+    def get_bridge(self, bridge_id):
+        return self._bridges[bridge_id]
+
+    def get_bridge_ports(self, bridge_id):
+        port_ids = self._bridge_to_ports[bridge_id]
+        return [self.get_port(port_id) for port_id in port_ids]
+
+    def add_container(self, container):
+        container_id = str(uuid.uuid4())
+        self._containers[container_id] = container
+
+        return container_id
+
+    def remove_container(self, container_id):
+        for port_id in self.get_container_ports(container_id):
+            self.remove_port(port_id)
+
+        del self._containers[container_id]
+
+    def get_container(self, container_id):
+        return self._containers[container_id]
+
+    def get_container_ports(self, container_id):
+        return self._container_to_ports[container_id]
+
+
+
+class Datastore(object):
+    """
+    This class is used to store data that is shared among different instance of
+    the Container class.
+    """
+    def __init__(self):
+        self.lxc_manager = LxcManager()
+        self.cal_manager = CalManager()
+        self.cal_to_lxc = {'image': {}, 'port': {}, 'network': {}, 'vm': {}}
+        self.last_index = 0
+
+
+class CloudSimPlugin(GObject.Object, RwCal.Cloud):
+    # HACK this is a work-around for sharing/persisting container information.
+    # This will only work for instances of CloudSimPlugin that are within the
+    # same process. Thus, it works in collapsed mode, but will not work in
+    # expanded mode. At the point where it is necessary to persist this
+    # information in expanded mode, we will need to find a better solution.
+    datastore = None
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        if CloudSimPlugin.datastore is None:
+            CloudSimPlugin.datastore = Datastore()
+
+    @property
+    def lxc(self):
+        return CloudSimPlugin.datastore.lxc_manager
+
+    @property
+    def cal(self):
+        return CloudSimPlugin.datastore.cal_manager
+
+    @property
+    def volume_group(self):
+        return lvm.get("rift")
+
+    @property
+    def cal_to_lxc(self):
+        return CloudSimPlugin.datastore.cal_to_lxc
+
+    def next_snapshot_name(self):
+        """Generates a new snapshot name for a container"""
+        CloudSimPlugin.datastore.last_index += 1
+        return 'rws{}'.format(CloudSimPlugin.datastore.last_index)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="cloudsim",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+        return self.lxc.mgmt_network
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        def file_md5(path, block_size=2 ** 20):
+            """
+            Block size directly depends on the block size of your filesystem
+            to avoid performances issues.
+            """
+            md5 = hashlib.md5()
+            with open(path, 'rb') as f:
+                for chunk in iter(lambda: f.read(block_size), b''):
+                    md5.update(chunk)
+
+            return md5.hexdigest()
+
+        current_images = self.cal.get_image_list()
+        lxc_name = "rwm{}".format(len(current_images))
+
+        if not image.has_field("disk_format"):
+            logger.warning("Image disk format not provided assuming qcow2")
+            image.disk_format = "qcow2"
+
+        if image.disk_format not in ["qcow2"]:
+            msg = "Only qcow2 currently supported for container CAL"
+            raise exceptions.RWErrorNotSupported(msg)
+
+        logger.debug('Calculating IMAGE checksum...')
+        image.checksum = file_md5(image.location)
+        logger.debug("Calculated image checksum: %s", image.checksum)
+        image.state = 'active'
+
+        container = lxc.create_container(
+                name=lxc_name,
+                template_path=os.path.join(
+                        os.environ['RIFT_INSTALL'],
+                        "etc/lxc-fedora-rift.lxctemplate",
+                        ),
+                volume="rift",
+                rootfs_qcow2file=image.location,
+                )
+
+
+        # Add the images to the managers
+        cal_image_id = self.cal.add_image(image)
+        lxc_image_id = self.lxc.add_container(container)
+
+        # Create the CAL to LXC mapping
+        self.cal_to_lxc["image"][cal_image_id] = lxc_image_id
+
+        image.id = cal_image_id
+
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        container_id = self.cal_to_lxc["image"][image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Stop the image and destroy it (NB: it should not be necessary to stop
+        # the container, but just in case)
+        container.stop()
+        container.destroy()
+
+        self.cal.remove_image(image_id)
+        self.lxc.remove_container(container_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        return self.cal.get_image(image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        resources = RwcalYang.VimResources()
+        for image in self.cal.get_image_list():
+            resources.imageinfo_list.append(rwcal_copy_object(image))
+
+        return resources
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        # Retrieve the container that will be used as the base of the snapshot
+        container_id = self.cal_to_lxc["image"][vm.image_id]
+        container = self.lxc.get_container(container_id)
+
+        # Create a container snapshot
+        snapshot = container.snapshot(self.next_snapshot_name())
+        snapshot.hostname = vm.vm_name
+
+        # Register the vm and container
+        snapshot_id = self.lxc.add_container(snapshot)
+        vm.vm_id = self.cal.add_vm(vm.image_id, vm)
+
+        self.cal_to_lxc["vm"][vm.vm_id] = snapshot_id
+
+        return vm.vm_id
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        port_ids = self.lxc.get_container_ports(container_id)
+
+        config = lxc.ContainerConfig(snapshot.name)
+
+        for port_id in port_ids:
+            port = self.lxc.get_port(port_id)
+            config.add_network_config(port)
+
+        vm = self.cal.get_vm(vm_id)
+
+        # Set the management IP on the vm if not yet set
+        if not vm.has_field("management_ip"):
+            mgmt_ip = self.lxc.acquire_mgmt_ip()
+            vm.management_ip = mgmt_ip
+
+        # Add the management interface
+        config.add_network_config(
+                lxc.NetworkConfig(
+                    type="veth",
+                    link=self.lxc.mgmt_network.network_name,
+                    name="eth0",
+                    ipv4=vm.management_ip,
+                    ipv4_gateway='auto',
+                    )
+                )
+
+        # Add rift root as a mount point
+        config.add_mount_point_config(
+            lxc.MountConfig(
+                local=os.environ["RIFT_ROOT"],
+                remote=os.environ["RIFT_ROOT"][1:],
+                read_only=False,
+                )
+            )
+
+        userdata=None
+        if vm.cloud_init.has_field("userdata"):
+            userdata = vm.cloud_init.userdata
+
+        snapshot.configure(config, userdata=userdata)
+        # For some reason, the cloud-init fails or runs only partially when
+        # you start the container immediately after writing the config files.
+        # A sleep of 1 sec seems to magically fix the issue!!
+        time.sleep(1)
+        snapshot.start()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        # Stop the container
+        container_id = self.cal_to_lxc["vm"][vm_id]
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        container_id = self.cal_to_lxc["vm"][vm_id]
+
+        snapshot = self.lxc.get_container(container_id)
+        snapshot.stop()
+        snapshot.destroy()
+
+        self.cal.remove_vm(vm_id)
+        self.lxc.remove_container(container_id)
+
+        # TODO: Recycle management ip
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self.do_stop_vm(account, vm_id, no_rwstatus=True)
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        if vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(vm_id))
+
+        return self.cal.get_vm(vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        resources = RwcalYang.VimResources()
+        for vm in self.cal.get_vm_list():
+            resources.vminfo_list.append(rwcal_copy_object(vm))
+
+        return resources
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = str(uuid.uuid4())
+        flavor.id = flavor_id
+        self.cal.flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.cal.flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.cal.flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.cal.flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        if port.network_id not in self.cal_to_lxc["network"]:
+            msg = 'Unable to find the specified network ({})'
+            raise exceptions.RWErrorNotFound(msg.format(port.network_id))
+
+        if port.vm_id not in self.cal_to_lxc["vm"]:
+            msg = "Unable to find the specified VM ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port.vm_id))
+
+        if port.has_field("ip_address"):
+            raise exceptions.RWErrorFailure("IP address of the port must not be specific")
+
+        network = self.cal.get_network(port.network_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        port.ip_address = ip_pool.allocate_ip()
+
+        net_config = lxc.NetworkConfig(
+                type='veth',
+                link=network.network_name[:15],
+                name="veth" + str(uuid.uuid4())[:10],
+                ipv4=port.ip_address,
+                )
+
+        lxc_network_id = self.cal_to_lxc["network"][port.network_id]
+        lxc_vm_id = self.cal_to_lxc["vm"][port.vm_id]
+
+        cal_port_id = self.cal.add_port(port.network_id, port.vm_id, port)
+        lxc_port_id = self.lxc.add_port(lxc_network_id, lxc_vm_id, net_config)
+
+        self.cal_to_lxc["port"][cal_port_id] = lxc_port_id
+        port.port_id = cal_port_id
+
+        return port.port_id
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        lxc_port_id = self.cal_to_lxc["port"][port_id]
+
+        # Release the port's ip address back into the network pool
+        port = self.cal.get_port(port_id)
+        ip_pool = self.cal.get_network_ip_pool(port.network_id)
+        ip_pool.deallocate_ip(port.ip_address)
+
+        self.cal.remove_port(port_id)
+        self.lxc.remove_port(lxc_port_id)
+
+        del self.cal_to_lxc["port"][port_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        if port_id not in self.cal_to_lxc["port"]:
+            msg = "Unable to find the specified port ({})"
+            raise exceptions.RWErrorNotFound(msg.format(port_id))
+
+        return self.cal.get_port(port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+        resources = RwcalYang.VimResources()
+        for port in self.datastore.cal_manager.get_port_list():
+            resources.portinfo_list.append(rwcal_copy_object(port))
+
+        return resources
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+
+        # Create the network
+        try:
+            # Setup a pool of mgmt IPv4 addresses
+            if net.bridge_exists(network.network_name):
+                logger.warning("Bridge %s already exists.  Removing.", network.network_name)
+                net.bridge_down(network.network_name)
+                net.bridge_remove(network.network_name)
+
+            # Ensure that the subnet field was filled out and is valid
+            if not network.has_field("subnet"):
+                raise CreateNetworkError("subnet not provided in create network request")
+
+            try:
+                ipaddress.IPv4Network(network.subnet)
+            except ValueError as e:
+                raise CreateNetworkError("Could not convert subnet into a "
+                                         "IPv4Network: %s" % str(network.subnet))
+
+            ip_pool = NetworkIPPool(network.subnet)
+
+            # Create the management bridge with interface information
+            net.create(network.network_name)
+
+        except Exception as e:
+            logger.warning(str(e))
+
+        # Register the network
+        cal_network_id = self.cal.add_network(network)
+        lxc_network_id = self.lxc.add_bridge(network)
+        self.cal.add_network_ip_pool(cal_network_id, ip_pool)
+
+        self.cal_to_lxc["network"][cal_network_id] = lxc_network_id
+
+        # Set the ID of the network object
+        network.network_id = cal_network_id
+
+        return network.network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        if network_id not in self.cal_to_lxc["network"]:
+            msg = "Unable to find the specified network ({})"
+            raise exceptions.RWErrorNotFound(msg.format(network_id))
+
+        # Get the associated bridge ID
+        bridge_id = self.cal_to_lxc["network"][network_id]
+
+        # Delete the network
+        network = self.cal.get_network(network_id)
+        net.delete(network.network_name)
+
+        # Remove the network records
+        self.lxc.remove_bridge(bridge_id)
+        self.cal.remove_network(network_id)
+        del self.cal_to_lxc["network"][network_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        return self.cal.get_network(network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        resources = RwcalYang.VimResources()
+        for network in self.cal.get_network_list():
+            resources.networkinfo_list.append(rwcal_copy_object(network))
+
+        return resources
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        network = RwcalYang.NetworkInfoItem()
+        network.network_name = link_params.name
+        network.subnet = link_params.subnet
+
+        if link_params.has_field("provider_network"):
+            logger.warning("Container CAL does not implement provider network")
+
+        rs, net_id = self.do_create_network(account, network)
+        if rs != RwTypes.RwStatus.SUCCESS:
+            raise exceptions.RWErrorFailure(rs)
+
+        return net_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+
+        network_ports = self.cal.get_network_ports(link_id)
+        for port_id in network_ports:
+            self.do_delete_port(account, port_id, no_rwstatus=True)
+
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by container cal
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from container cal
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info.port_name
+        c_point.connection_point_id = port_info.port_id
+        c_point.ip_address = port_info.ip_address
+        c_point.state = 'active'
+        c_point.virtual_link_id = port_info.network_id
+        c_point.vdu_id = port_info.vm_id
+
+    @staticmethod
+    def create_virtual_link_info(network_info, port_list):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by container manager into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from container cal
+            port_list - A list of port information from container cal
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name = network_info.network_name
+        link.state = 'active'
+        link.virtual_link_id = network_info.network_id
+        for port in port_list:
+            c_point = link.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        link.subnet = network_info.subnet
+
+        return link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+
+        network = self.do_get_network(account, link_id, no_rwstatus=True)
+        port_ids = self.cal.get_network_ports(network.network_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_ids]
+
+        virtual_link = CloudSimPlugin.create_virtual_link_info(
+                network, ports
+                )
+
+        return virtual_link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        networks = self.do_get_network_list(account, no_rwstatus=True)
+        vnf_resources = RwcalYang.VNFResources()
+        for network in networks.networkinfo_list:
+            virtual_link = self.do_get_virtual_link(account, network.network_id, no_rwstatus=True)
+            vnf_resources.virtual_link_info_list.append(virtual_link)
+
+        return vnf_resources
+
+    def _create_connection_point(self, account, c_point, vdu_id):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = c_point.name
+        port.network_id = c_point.virtual_link_id
+        port.port_type = 'normal' ### Find Port type from network_profile under cloud account
+        port.vm_id = vdu_id
+        port_id = self.do_create_port(account, port, no_rwstatus=True)
+        return port_id
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### Create VM
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = vdu_init.name
+        vm.image_id = vdu_init.image_id
+        if vdu_init.vdu_init.has_field('userdata'):
+            vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+        vm.user_tags.node_id = vdu_init.node_id
+
+        vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+
+        ### Now create required number of ports aka connection points
+        port_list = []
+        for c_point in vdu_init.connection_points:
+            virtual_link_id = c_point.virtual_link_id
+
+            # Attempt to fetch the network to verify that the network
+            # already exists.
+            self.do_get_network(account, virtual_link_id, no_rwstatus=True)
+
+            port_id = self._create_connection_point(account, c_point, vm_id)
+            port_list.append(port_id)
+
+        # Finally start the vm
+        self.do_start_vm(account, vm_id, no_rwstatus=True)
+
+        return vm_id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        if not vdu_modify.has_field("vdu_id"):
+            raise ValueError("vdu_id must not be empty")
+
+        for c_point in vdu_modify.connection_points_add:
+            if not c_point.has_field("virtual_link_id"):
+                raise ValueError("virtual link id not provided")
+
+            network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point, vdu_modify.vdu_id)
+            port_list.append(port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        self.do_reboot_vm(account, vdu_modify.vdu_id)
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        ### Get list of port on VM and delete them.
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        for port in ports:
+            self.do_delete_port(account, port.port_id, no_rwstatus=True)
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+
+    @staticmethod
+    def fill_vdu_info(vm_info, port_list):
+        """create a gi object for vduinfoparams
+
+        converts vm information dictionary object returned by openstack
+        driver into protobuf gi object
+
+        arguments:
+            vm_info - vm information from openstack
+            mgmt_network - management network
+            port_list - a list of port information from container cal
+        returns:
+            protobuf gi object for vduinfoparams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info.vm_name
+        vdu.vdu_id = vm_info.vm_id
+        vdu.management_ip = vm_info.management_ip
+        vdu.public_ip = vm_info.management_ip
+        vdu.node_id = vm_info.user_tags.node_id
+        vdu.image_id = vm_info.image_id
+        vdu.state = 'active'
+
+        # fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            CloudSimPlugin.fill_connection_point_info(c_point, port)
+
+        vdu.vm_flavor.vcpu_count = 1
+        vdu.vm_flavor.memory_mb = 8 * 1024 # 8GB
+        vdu.vm_flavor.storage_gb = 10
+
+        return vdu
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        port_id_list = self.cal.get_vm_ports(vdu_id)
+        ports = [self.cal.get_port(p_id) for p_id in port_id_list]
+        vm_info = self.do_get_vm(account, vdu_id, no_rwstatus=True)
+        vdu_info = CloudSimPlugin.fill_vdu_info(vm_info, ports)
+
+        return vdu_info
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+
+        vnf_resources = RwcalYang.VNFResources()
+
+        vm_resources = self.do_get_vm_list(account, no_rwstatus=True)
+        for vm in vm_resources.vminfo_list:
+            port_list = self.cal.get_vm_ports(vm.vm_id)
+            port_list = [self.cal.get_port(port_id) for port_id in port_list]
+            vdu = CloudSimPlugin.fill_vdu_info(vm, port_list)
+            vnf_resources.vdu_info_list.append(vdu)
+
+        return vnf_resources
diff --git a/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py b/rwcal/plugins/vala/rwcal_cloudsim/test/cloudsim_module_test.py
new file mode 100755 (executable)
index 0000000..64837ad
--- /dev/null
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import ipaddress
+import unittest
+import uuid
+import sys
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+
+sys.path.append('../')
+import rwcal_cloudsim
+
+
+logger = logging.getLogger('rwcal-cloudsim')
+
+
+class CloudsimTest(unittest.TestCase):
+    @classmethod
+    def cleanUp(cls):
+        for container in lxc.containers():
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            lxc.destroy(container)
+
+        #lvm.destroy("rift")
+
+    @classmethod
+    def create_image(cls):
+        image = RwcalYang.ImageInfoItem()
+        image.name = "rift-lxc-image"
+        image.location = "/net/sharedfiles/home1/common/vm/R0.4/rift-mano-devel-latest.qcow2"
+        image.disk_format = "qcow2"
+        image.id = cls.cal.do_create_image(cls.account, image, no_rwstatus=True)
+
+        cls.image = image
+
+    @classmethod
+    def setUpClass(cls):
+        cls.cleanUp()
+
+        lvm.create("rift")
+        cls.account = RwcalYang.CloudAccount()
+        cls.cal = rwcal_cloudsim.CloudSimPlugin()
+        cls.create_image()
+
+    def setUp(self):
+        pass
+
+    def create_vm(self, image, index):
+        vm = RwcalYang.VmInfo()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+        vm.user_tags.node_id = str(uuid.uuid4())
+
+        self.cal.do_create_vm(self.account, vm, no_rwstatus=True)
+
+        return vm
+
+    def create_virtual_link(self, index):
+        link = RwcalYang.VirtualLinkReqParams()
+        link.name = 'link-{}'.format(index + 1)
+        link.subnet = '192.168.{}.0/24'.format(index + 1)
+
+        logger.debug("Creating virtual link: %s", link)
+
+        link_id = self.cal.do_create_virtual_link(self.account, link, no_rwstatus=True)
+        return link, link_id
+
+    def create_vdu(self, image, index, virtual_link_ids=None):
+        vdu_init = RwcalYang.VDUInitParams()
+        vdu_init.name = 'rift-vdu{}'.format(index + 1)
+        vdu_init.node_id = str(uuid.uuid4())
+        vdu_init.image_id = image.id
+
+        if virtual_link_ids is not None:
+            for vl_id in virtual_link_ids:
+                cp = vdu_init.connection_points.add()
+                cp.name = "{}_{}".format(vdu_init.name, vl_id)
+                cp.virtual_link_id = vl_id
+
+        vdu_id = self.cal.do_create_vdu(self.account, vdu_init, no_rwstatus=True)
+
+        return vdu_init, vdu_id
+
+    def test_create_vm(self):
+        self.create_vm(self.image, 0)
+
+    def test_create_delete_virtual_link(self):
+        link, link_id = self.create_virtual_link(0)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert get_link.name == link.name
+        assert get_link.virtual_link_id == link_id
+        assert len(get_link.connection_points) == 0
+        assert get_link.state == "active"
+
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 1
+        assert resources.virtual_link_info_list[0] == get_link
+
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+        resources = self.cal.do_get_virtual_link_list(self.account, no_rwstatus=True)
+        assert len(resources.virtual_link_info_list) == 0
+
+    def test_create_delete_vdu(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        assert get_vdu.image_id == self.image.id
+        assert get_vdu.name == vdu.name
+        assert get_vdu.node_id == vdu.node_id
+
+        assert len(get_vdu.connection_points) == 0
+
+        assert get_vdu.vm_flavor.vcpu_count >= 1
+        assert get_vdu.vm_flavor.memory_mb >= 8 * 1024
+        assert get_vdu.vm_flavor.storage_gb >= 5
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 1
+        assert resources.vdu_info_list[0] == get_vdu
+
+        resources = self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        resources = self.cal.do_get_vdu_list(self.account, no_rwstatus=True)
+        assert len(resources.vdu_info_list) == 0
+
+    def test_create_vdu_single_connection_point(self):
+        link, link_id = self.create_virtual_link(0)
+        vdu, vdu_id = self.create_vdu(self.image, 0, [link_id])
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+        cp = get_vdu.connection_points[0]
+        assert (ipaddress.IPv4Address(cp.ip_address) in
+                ipaddress.IPv4Network(link.subnet))
+
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 1
+        assert get_link.connection_points[0].vdu_id == vdu_id
+        assert get_link.connection_points[0].virtual_link_id == link_id
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        get_link = self.cal.do_get_virtual_link(self.account, link_id, no_rwstatus=True)
+        assert len(get_link.connection_points) == 0
+
+        self.cal.do_delete_virtual_link(self.account, link_id)
+
+    def test_create_vdu_multiple_connection_point(self):
+        link1, link1_id = self.create_virtual_link(0)
+        link2, link2_id = self.create_virtual_link(1)
+        link3, link3_id = self.create_virtual_link(2)
+        link_id_map = {link1_id: link1, link2_id: link2, link3_id: link3}
+
+        vdu, vdu_id = self.create_vdu(self.image, 0, link_id_map.keys())
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 3
+        for cp in get_vdu.connection_points:
+            assert cp.virtual_link_id in link_id_map
+            link = link_id_map[cp.virtual_link_id]
+
+            assert (ipaddress.IPv4Address(cp.ip_address) in
+                    ipaddress.IPv4Network(link.subnet))
+
+        self.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+
+        self.do_delete_virtual_link(self.account, link1_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link2_id, no_rwstatus=True)
+        self.do_delete_virtual_link(self.account, link3_id, no_rwstatus=True)
+
+    def test_modify_vdu_add_remove_connection_point(self):
+        vdu, vdu_id = self.create_vdu(self.image, 0)
+        link, link_id = self.create_virtual_link(0)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_add.add()
+        cp.virtual_link_id = link_id
+        cp.name = "link_1"
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 1
+
+        modify_vdu = RwcalYang.VDUModifyParams()
+        modify_vdu.vdu_id = vdu_id
+        cp = modify_vdu.connection_points_remove.add()
+        cp.connection_point_id = get_vdu.connection_points[0].connection_point_id
+        self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
+
+        get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+        assert len(get_vdu.connection_points) == 0
+
+        self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
+        self.cal.do_delete_virtual_link(self.account, link_id, no_rwstatus=True)
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt b/rwcal/plugins/vala/rwcal_cloudsimproxy/CMakeLists.txt
new file mode 100644 (file)
index 0000000..66e0a3f
--- /dev/null
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+set(PKG_NAME rwcal-cloudsimproxy)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_cloudsimproxy rwcal_cloudsimproxy.py)
+
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile b/rwcal/plugins/vala/rwcal_cloudsimproxy/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py b/rwcal/plugins/vala/rwcal_cloudsimproxy/rwcal_cloudsimproxy.py
new file mode 100644 (file)
index 0000000..addb4d3
--- /dev/null
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import os
+import shutil
+import tempfile
+
+import requests
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang,
+    )
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.cloudsimproxy')
+
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class CloudsimProxyError(Exception):
+    pass
+
+
+class CloudSimProxyPlugin(GObject.Object, RwCal.Cloud):
+    DEFAULT_PROXY_HOST = "localhost"
+    DEFAULT_PROXY_PORT = 9002
+
+    def __init__(self):
+        self._session = None
+        self._host = None
+        self._port = CloudSimProxyPlugin.DEFAULT_PROXY_PORT
+
+    @property
+    def session(self):
+        if self._session is None:
+            self._session = requests.Session()
+
+        return self._session
+
+    @property
+    def host(self):
+        return self._host
+
+    @host.setter
+    def host(self, host):
+        if self._host is not None:
+            if host != self._host:
+                raise CloudsimProxyError("Cloudsim host changed during execution")
+
+        self._host = host
+
+    def _set_host_from_account(self, account):
+        self.host = account.cloudsim_proxy.host
+
+    def _proxy_rpc_call(self, api, **kwargs):
+        url = "http://{host}:{port}/api/{api}".format(
+                host=self._host,
+                port=self._port,
+                api=api,
+                )
+
+        post_dict = {}
+        for key, val in kwargs.items():
+            post_dict[key] = val
+
+        logger.debug("Sending post to url %s with json data: %s", url, post_dict)
+        r = self.session.post(url, json=post_dict)
+        r.raise_for_status()
+
+        response_dict = r.json()
+        logger.debug("Got json response: %s", response_dict)
+
+        return_vals = []
+        for return_val in response_dict["return_vals"]:
+            value = return_val["value"]
+            proto_type = return_val["proto_type"]
+            if proto_type is not None:
+                gi_cls = getattr(RwcalYang, proto_type)
+                logger.debug("Deserializing into %s", proto_type)
+                gi_obj = gi_cls.from_dict(value)
+                value = gi_obj
+
+            return_vals.append(value)
+
+        logger.debug("Returning RPC return values: %s", return_vals)
+
+        if len(return_vals) == 0:
+            return None
+
+        elif len(return_vals) == 1:
+            return return_vals[0]
+
+        else:
+            return tuple(return_vals[1:])
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        logger.addHandler(
+            rwlogger.RwLogger(
+                category="rw-cal-log",
+                subcategory="cloudsimproxy",
+                log_hdl=rwlog_ctx,
+            )
+        )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """Returns the management network
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            a NetworkInfo object
+
+        """
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_management_network")
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """Create a new image
+
+        Creates a new container based upon the template and tarfile specified.
+        Only one image is currently supported for a given instance of the CAL.
+
+        Arguments:
+            account - a cloud account
+            image   - an ImageInfo object
+
+        Raises:
+            An RWErrorDuplicate is raised if create_image is called and there
+            is already an image.
+
+        Returns:
+            The UUID of the new image
+
+        """
+        self._set_host_from_account(account)
+
+        if image.has_field("fileno"):
+            logger.debug("Got fileno for cloudsim image create")
+            new_fileno = os.dup(image.fileno)
+            read_hdl = os.fdopen(new_fileno, 'rb')
+            write_hdl = tempfile.NamedTemporaryFile()
+            image.location = write_hdl.name
+            logger.debug("Created temporary file to store the cloudsim image: %s", image.location)
+            shutil.copyfileobj(read_hdl, write_hdl)
+
+            image_dict = image.as_dict()
+            del image_dict["fileno"]
+        else:
+            image_dict = image.as_dict()
+
+        return self._proxy_rpc_call("create_image", image=image_dict)
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Deletes an image
+
+        This function will remove the record of the image from the CAL and
+        destroy the associated container.
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to delete
+
+        Raises:
+            An RWErrorNotEmpty exception is raised if there are VMs based on
+            this image (the VMs need to be deleted first). An RWErrorNotFound
+            is raised if the image_id does not match any of the known images.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_image")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Returns the specified image
+
+        Arguments:
+            account  - a cloud account
+            image_id - the UUID of the image to retrieve
+
+        Raises:
+            An RWErrorNotFound exception is raised if the image_id does not
+            match any of the known images.
+
+        Returns:
+            An image object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image", image_id=image_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Returns a list of images"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_image_list")
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """Create a VM
+
+        Arguments:
+            vm - the VM info used to define the desire VM
+
+        Raises:
+            An RWErrorFailure is raised if there is not
+
+        Returns:
+            a string containing the unique id of the created VM
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vm", vm=vm.as_dict())
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Starts the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to start
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("start_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stops the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to stop
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("stop_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Deletes the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("reboot_vm", vm_id=vm_id)
+
+    @rwstatus
+    def do_get_vm(self, account, vm_id):
+        """Returns the specified VM
+
+        Arguments:
+            vm_id - the id of the vm to return
+
+        Raises:
+            An RWErrorNotFound is raised if the specified vm id is not known to
+            this driver.
+
+        Returns:
+            a VMInfoItem object
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm", vm_id=vm_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Returns the a list of the VMs known to the driver
+
+        Returns:
+            a list of VMInfoItem objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vm_list")
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_flavor", flavor=flavor.as_dict())
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor", flavor_id=flavor_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_flavor_list")
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        """Create a port between a network and a virtual machine
+
+        Arguments:
+            account - a cloud account
+            port    - a description of port to create
+
+        Raises:
+            Raises an RWErrorNotFound exception if either the network or the VM
+            associated with the port cannot be found.
+
+        Returns:
+            the ID of the newly created port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_port", port=port.as_dict())
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to delete
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return the specified port
+
+        Arguments:
+            account - a cloud account
+            port_id - the ID of the port to return
+
+        Raises:
+            A RWErrorNotFound exception is raised if the specified port cannot
+            be found.
+
+        Returns:
+            The specified port.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port", port_id=port_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Returns a list of ports"""
+
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_port_list")
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        """Create a network
+
+        Arguments:
+            account - a cloud account
+            network - a description of the network to create
+
+        Returns:
+            The ID of the newly created network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_network", network=network.as_dict())
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        """Returns the specified network
+
+        Arguments:
+            account    - a cloud account
+            network_id - the UUID of the network to delete
+
+        Raises:
+            An RWErrorNotFound is raised if the specified network cannot be
+            found.
+
+        Returns:
+            The specified network
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network", network_id=network_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Returns a list of network objects"""
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_network_list")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        self._set_host_from_account(account)
+
+        status = RwcalYang.CloudConnectionStatus()
+        try:
+            self._proxy_rpc_call("get_vm_list")
+        except Exception as e:
+            status.status = "failure"
+            status.details = "connection to cloudsim server failed: %s" % str(e)
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link", link_id=link_id)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_virtual_link_list(self, account):
+        """Returns the a list of the Virtual links
+
+        Returns:
+            a list of RwcalYang.VirtualLinkInfoParams objects
+
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_virtual_link_list")
+
+    @rwstatus(ret_on_failure=[None])
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete the virtual link
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_virtual_link", link_id=link_id)
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_virtual_link", link_params=link_params.as_dict())
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("create_vdu", vdu_params=vdu_init.as_dict())
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("modify_vdu", vdu_params=vdu_modify.as_dict())
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("delete_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu", vdu_id=vdu_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        self._set_host_from_account(account)
+        return self._proxy_rpc_call("get_vdu_list")
diff --git a/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt b/rwcal/plugins/vala/rwcal_mock/CMakeLists.txt
new file mode 100644 (file)
index 0000000..1edf187
--- /dev/null
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-mock package
+set(PKG_NAME rwcal-mock)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+
+rift_install_python_plugin(rwcal_mock rwcal_mock.py)
diff --git a/rwcal/plugins/vala/rwcal_mock/Makefile b/rwcal/plugins/vala/rwcal_mock/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py b/rwcal/plugins/vala/rwcal_mock/rwcal_mock.py
new file mode 100644 (file)
index 0000000..a1776d1
--- /dev/null
@@ -0,0 +1,616 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import random
+import socket
+import struct
+import collections
+import hashlib
+import logging
+import os
+import uuid
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.mock')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,
+                           UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+                           MissingFileError: RwTypes.RwStatus.NOTFOUND,
+}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+class Resources(object):
+    def __init__(self):
+        self.images = dict()
+        self.vlinks = dict()
+        self.vdus  = dict()
+        self.flavors = dict()
+
+class MockPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.resources = collections.defaultdict(Resources)
+
+    @staticmethod
+    def get_uuid(name):
+        if name == None:
+            raise ValueError("Name can not be None")
+        return str(uuid.uuid3(uuid.NAMESPACE_DNS, name))
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="rwcal.mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+        account = RwcalYang.CloudAccount()
+        account.name = 'mock_account'
+        account.account_type = 'mock'
+        account.mock.username = 'mock_user'
+        self.create_default_resources(account)
+        account.name = 'mock_account1'
+        self.create_default_resources(account)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network
+
+        @param account - a cloud account
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        """
+        Create a new tenant.
+
+        @param name     - name to assign to the tenant.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """
+        delete a tenant.
+
+        @param tenant_id     - id of tenant to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """
+        List tenants.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        """
+        Create a new role.
+
+        @param name         - name to assign to the role.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """
+        delete a role.
+
+        @param role_id     - id of role to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """
+        List roles.
+
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        """
+        Create a VM image
+
+        @param account - cloud account information
+        @param image   - information about the image
+        """
+        if image.location is None:
+            raise ImageLocationError("uninitialized image location")
+
+        if not os.path.exists(image.location):
+            raise MissingFileError("{} does not exist".format(image.location))
+
+        image.id = self.get_uuid(image.name)
+
+        self.resources[account.name].images[image.id] = image
+        logger.debug('created image: {}'.format(image.id))
+        return image.id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """
+        delete a vm image.
+
+        @param image_id     - Instance id of VM image to be deleted.
+        """
+        if account.name not in self.resources:
+            raise UnknownAccountError()
+
+        del self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        return self.resources[account.name].images[image_id]
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        boxed_image_list = RwcalYang.VimResources()
+        for image in self.resources[account.name].images.values():
+            image_entry = RwcalYang.ImageInfoItem()
+            image_entry.id = image.id
+            image_entry.name = image.name
+            if image.has_field('checksum'):
+
+                image_entry.checksum = image.checksum
+            boxed_image_list.imageinfo_list.append(image_entry)
+
+        logger.debug("Image list for {}: {}".format(account.name, boxed_image_list.imageinfo_list))
+        return boxed_image_list
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        """
+        Create a new virtual machine.
+
+        @param name     - name to assign to the VM.  This does not have to be unique.
+        @param image    - name of image to load on the VM.
+        @param size     - name of the size of the VM to create.
+        @param location - name of the location to launch the VM in.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """
+        Start a virtual machine.
+
+        @param vm_id - id of VM to start
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """
+        Stop a virtual machine.
+
+        @param vm_id - id of VM to stop
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """
+        reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        """
+        create new flavor.
+
+        @param flavor   - Flavor object
+        """
+        flavor_id = self.get_uuid(flavor.name)
+        self.resources[account.name].flavors[flavor_id] = flavor
+        logger.debug('Created flavor: {}'.format(flavor_id))
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """
+        Delete flavor.
+
+        @param flavor_id     - Flavor id to be deleted.
+        """
+        logger.debug('Deleted flavor: {}'.format(flavor_id))
+        self.resources[account.name].flavors.pop(flavor_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        """
+        Return the specified flavor
+
+        @param flavor_id - the id of the flavor to return
+        """
+        flavor = self.resources[account.name].flavors[flavor_id]
+        logger.debug('Returning flavor-info for : {}'.format(flavor_id))
+        return flavor
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """
+        Return a list of flavors
+        """
+        vim_resources = RwcalYang.VimResources()
+        for flavor in self.resources[account.name].flavors.values():
+            f = RwcalYang.FlavorInfoItem()
+            f.copy_from(flavor)
+            vim_resources.flavorinfo_list.append(f)
+        logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
+        return vim_resources
+
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    def create_default_resources(self, account):
+        """
+        Create default resources
+        """
+        link_list = []
+        ### Add virtual links
+        #for i in range(1):
+        #    vlink = RwcalYang.VirtualLinkReqParams()
+        #    vlink.name = 'link-'+str(i)
+        #    vlink.subnet = '10.0.0.0/24'
+        #    rs, vlink_id = self.do_create_virtual_link(account, vlink)
+        #    assert vlink_id != ''
+        #    logger.debug("Creating static virtual-link with name: %s", vlink.name)
+        #    link_list.append(vlink_id)
+
+        #### Add VDUs
+        #for i in range(8):
+        #    vdu = RwcalYang.VDUInitParams()
+        #    vdu.name = 'vdu-'+str(i)
+        #    vdu.node_id = str(i)
+        #    vdu.image_id = self.get_uuid('image-'+str(i))
+        #    vdu.flavor_id = self.get_uuid('flavor'+str(i))
+        #    vdu.vm_flavor.vcpu_count = 4
+        #    vdu.vm_flavor.memory_mb = 4096*2
+        #    vdu.vm_flavor.storage_gb = 40
+        #    for j in range(2):
+        #        c = vdu.connection_points.add()
+        #        c.name = vdu.name+'-port-'+str(j)
+        #        c.virtual_link_id = link_list[j]
+        #    rs, vdu_id = self.do_create_vdu(account, vdu)
+        #    assert vdu_id != ''
+        #    logger.debug("Creating static VDU with name: %s", vdu.name)
+
+        for i in range(2):
+            flavor = RwcalYang.FlavorInfoItem()
+            flavor.name = 'flavor-'+str(i)
+            flavor.vm_flavor.vcpu_count = 4
+            flavor.vm_flavor.memory_mb = 4096*2
+            flavor.vm_flavor.storage_gb = 40
+            rc, flavor_id = self.do_create_flavor(account, flavor)
+
+        for i in range(2):
+            image = RwcalYang.ImageInfoItem()
+            image.name = "rwimage"
+            image.id = self.get_uuid('image-'+str(i))
+            image.checksum = self.get_uuid('rwimage'+str(i))
+            image.location = "/dev/null"
+            rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = "a6ffaa77f949a9e4ebb082c6147187cf"#self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = "Fedora-x86_64-20-20131211.1-sda-pong.qcow2"
+        image.id = self.get_uuid(image.name)
+        image.checksum = "977484d95575f80ef8399c9cf1d45ebd"#self.get_uuid(image.name)
+        image.location = "/dev/null"
+        rc, image_id = self.do_create_image(account, image)
+
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        vlink_id = self.get_uuid("%s_%s" % (link_params.name, len(self.resources[account.name].vlinks)))
+        vlink = RwcalYang.VirtualLinkInfoParams()
+        vlink.name = link_params.name
+        vlink.state = 'active'
+        vlink.virtual_link_id = vlink_id
+        vlink.subnet = link_params.subnet
+        vlink.connection_points = []
+        for field in link_params.provider_network.fields:
+            if link_params.provider_network.has_field(field):
+                setattr(vlink.provider_network, field, getattr(link_params.provider_network, field))
+
+        self.resources[account.name].vlinks[vlink_id] = vlink
+        logger.debug('created virtual-link: {}'.format(vlink_id))
+        return vlink_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        self.resources[account.name].vlinks.pop(link_id)
+        logger.debug('deleted virtual-link: {}'.format(link_id))
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        vlink = self.resources[account.name].vlinks[link_id]
+        logger.debug('Returning virtual-link-info for : {}'.format(link_id))
+        return vlink
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vlinks.values():
+            vlink = RwcalYang.VirtualLinkInfoParams()
+            vlink.copy_from(r)
+            vnf_resources.virtual_link_info_list.append(vlink)
+        logger.debug("Returning list of virtual-link-info of size: %d", len(vnf_resources.virtual_link_info_list))
+        return vnf_resources
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        vdu_id = self.get_uuid("%s_%s" % (vdu_init.name, len(self.resources[account.name].vdus)))
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.vdu_id = vdu_id
+        vdu.name = vdu_init.name
+        vdu.node_id = vdu_init.node_id
+        vdu.image_id = vdu_init.image_id
+        if vdu_init.has_field('flavor_id'):
+            vdu.flavor_id = vdu_init.flavor_id
+
+        if vdu_init.has_field('vm_flavor'):
+            xx = vdu.vm_flavor.new()
+            xx.from_pbuf(vdu_init.vm_flavor.to_pbuf())
+            vdu.vm_flavor = xx
+
+        if vdu_init.has_field('guest_epa'):
+            xx = vdu.guest_epa.new()
+            xx.from_pbuf(vdu_init.guest_epa.to_pbuf())
+            vdu.guest_epa = xx
+
+        if vdu_init.has_field('vswitch_epa'):
+            xx = vdu.vswitch_epa.new()
+            xx.from_pbuf(vdu_init.vswitch_epa.to_pbuf())
+            vdu.vswitch_epa = xx
+
+        if vdu_init.has_field('hypervisor_epa'):
+            xx = vdu.hypervisor_epa.new()
+            xx.from_pbuf(vdu_init.hypervisor_epa.to_pbuf())
+            vdu.hypervisor_epa = xx
+
+        if vdu_init.has_field('host_epa'):
+            xx = vdu.host_epa.new()
+            xx.from_pbuf(vdu_init.host_epa.to_pbuf())
+            vdu.host_epa = xx
+
+        vdu.state = 'active'
+        vdu.management_ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+        vdu.public_ip = vdu.management_ip
+
+        for c in vdu_init.connection_points:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            v = vlink.connection_points.add()
+            for field in p.fields:
+                if p.has_field(field):
+                    setattr(v, field, getattr(p, field))
+
+        self.resources[account.name].vdus[vdu_id] = vdu
+        logger.debug('Created vdu: {}'.format(vdu_id))
+        return vdu_id
+
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        vdu = self.resources[account.name].vdus[vdu_modify.vdu_id]
+        for c in vdu_modify.connection_points_add:
+            p = vdu.connection_points.add()
+            p.connection_point_id = self.get_uuid(c.name)
+            p.name = c.name
+            p.vdu_id = vdu.vdu_id
+            p.state = 'active'
+            p.ip_address = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
+            p.virtual_link_id = c.virtual_link_id
+            # Need to add this connection_point to virtual link
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            aa = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+            aa.connection_point_id = p.connection_point_id
+            aa.name = p.name
+            aa.virtual_link_id = vlink.virtual_link_id
+            aa.state = 'active'
+            aa.ip_address = p.ip_address
+            aa.vdu_id = p.vdu_id
+            vlink.connection_points.append(aa)
+
+        for c in vdu_modify.connection_points_remove:
+            for d in vdu.connection_points:
+                if c.connection_point_id == d.connection_point_id:
+                    vdu.connection_points.remove(d)
+                    break
+            for k, vlink in self.resources[account.name].vlinks.items():
+                for z in vlink.connection_points:
+                    if z.connection_point_id == c.connection_point_id:
+                        vlink.connection_points.remove(z)
+                        break
+        logger.debug('modified vdu: {}'.format(vdu_modify.vdu_id))
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus.pop(vdu_id)
+        for c in vdu.connection_points:
+            vlink = self.resources[account.name].vlinks[c.virtual_link_id]
+            z = [p for p in vlink.connection_points if p.connection_point_id == c.connection_point_id]
+            assert len(z) == 1
+            vlink.connection_points.remove(z[0])
+
+        logger.debug('deleted vdu: {}'.format(vdu_id))
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        vdu = self.resources[account.name].vdus[vdu_id]
+        logger.debug('Returning vdu-info for : {}'.format(vdu_id))
+        return vdu.copy()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        vnf_resources = RwcalYang.VNFResources()
+        for r in self.resources[account.name].vdus.values():
+            vdu = RwcalYang.VDUInfoParams()
+            vdu.copy_from(r)
+            vnf_resources.vdu_info_list.append(vdu)
+        logger.debug("Returning list of vdu-info of size: %d", len(vnf_resources.vdu_info_list))
+        return vnf_resources
+
diff --git a/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openmano/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3218907
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_openmano rwcal_openmano.py)
diff --git a/rwcal/plugins/vala/rwcal_openmano/Makefile b/rwcal/plugins/vala/rwcal_openmano/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py b/rwcal/plugins/vala/rwcal_openmano/rwcal_openmano.py
new file mode 100644 (file)
index 0000000..1503d64
--- /dev/null
@@ -0,0 +1,254 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.openmano')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalOpenmanoPlugin(GObject.Object, RwCal.Cloud):
+    """Stub implementation the CAL VALA methods for Openmano. """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="openmano",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus(
+                status="success",
+                details=""
+                )
+        print("Returning status: %s", str(status))
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        logger.warning("Creating image on openmano not supported")
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openmano_vimconnector/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8938f0a
--- /dev/null
@@ -0,0 +1,35 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-openstack package
+set(PKG_NAME rwcal-openmano-vimconnector)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_openmano_vimconnector rwcal_openmano_vimconnector.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/openmano_vimconnector/__init__.py
+    rift/rwcal/openmano_vimconnector/vimconn.py
+    rift/rwcal/openmano_vimconnector/vimconn_openvim.py
+    rift/rwcal/openmano_vimconnector/openmano_schemas.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile b/rwcal/plugins/vala/rwcal_openmano_vimconnector/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/README
new file mode 100644 (file)
index 0000000..d235ad5
--- /dev/null
@@ -0,0 +1,4 @@
+Below is commit of two files taken from Openmano git repository
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/vimconn.py
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/vimconn_openvim.py
+https://raw.githubusercontent.com/nfvlabs/openmano/71ffb2c9be4639ce2ec6179d45a2690cf6589c95/openmano/openmano_schemas.py
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/__init__.py
new file mode 100644 (file)
index 0000000..e405796
--- /dev/null
@@ -0,0 +1 @@
+from .vimconn_openvim import vimconnector
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/openmano_schemas.py
new file mode 100644 (file)
index 0000000..1f1bbe7
--- /dev/null
@@ -0,0 +1,752 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+JSON schemas used by openmano httpserver.py module to parse the different files and messages sent through the API 
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$09-oct-2014 09:09:48$"
+
+#Basis schemas
+patern_name="^[ -~]+$"
+passwd_schema={"type" : "string", "minLength":1, "maxLength":60}
+nameshort_schema={"type" : "string", "minLength":1, "maxLength":60, "pattern" : "^[^,;()'\"]+$"}
+name_schema={"type" : "string", "minLength":1, "maxLength":255, "pattern" : "^[^,;()'\"]+$"}
+xml_text_schema={"type" : "string", "minLength":1, "maxLength":1000, "pattern" : "^[^']+$"}
+description_schema={"type" : ["string","null"], "maxLength":255, "pattern" : "^[^'\"]+$"}
+id_schema_fake = {"type" : "string", "minLength":2, "maxLength":36 }  #"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+pci_schema={"type":"string", "pattern":"^[0-9a-fA-F]{4}(:[0-9a-fA-F]{2}){2}\.[0-9a-fA-F]$"}
+http_schema={"type":"string", "pattern":"^https?://[^'\"=]+$"}
+bandwidth_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]bps)?$"}
+memory_schema={"type":"string", "pattern" : "^[0-9]+ *([MG]i?[Bb])?$"}
+integer0_schema={"type":"integer","minimum":0}
+integer1_schema={"type":"integer","minimum":1}
+path_schema={"type":"string", "pattern":"^(\.){0,2}(/[^/\"':{}\(\)]+)+$"}
+vlan_schema={"type":"integer","minimum":1,"maximum":4095}
+vlan1000_schema={"type":"integer","minimum":1000,"maximum":4095}
+mac_schema={"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){5}$"}  #must be unicast LSB bit of MSB byte ==0 
+#mac_schema={"type":"string", "pattern":"^([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$"}
+ip_schema={"type":"string","pattern":"^([0-9]{1,3}.){3}[0-9]{1,3}$"}
+port_schema={"type":"integer","minimum":1,"maximum":65534}
+object_schema={"type":"object"}
+schema_version_2={"type":"integer","minimum":2,"maximum":2}
+log_level_schema={"type":"string", "enum":["DEBUG", "INFO", "WARNING","ERROR","CRITICAL"]}
+
+metadata_schema={
+    "type":"object",
+    "properties":{
+        "architecture": {"type":"string"},
+        "use_incremental": {"type":"string","enum":["yes","no"]},
+        "vpci": pci_schema,
+        "os_distro": {"type":"string"},
+        "os_type": {"type":"string"},
+        "os_version": {"type":"string"},
+        "bus": {"type":"string"},
+        "topology": {"type":"string", "enum": ["oneSocket"]}
+    }
+}
+
+#Schema for the configuration file
+config_schema = {
+    "title":"configuration response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "http_port": port_schema,
+        "http_admin_port": port_schema,
+        "http_host": nameshort_schema,
+        "vnf_repository": path_schema,
+        "db_host": nameshort_schema,
+        "db_user": nameshort_schema,
+        "db_passwd": {"type":"string"},
+        "db_name": nameshort_schema,
+        # Next fields will disappear once the MANO API includes appropriate primitives
+        "vim_url": http_schema,
+        "vim_url_admin": http_schema,
+        "vim_name": nameshort_schema,
+        "vim_tenant_name": nameshort_schema,
+        "mano_tenant_name": nameshort_schema,
+        "mano_tenant_id": id_schema, 
+        "http_console_ports": {
+            "type": "array", 
+            "items": {"OneOf" : [
+                port_schema, 
+                {"type":"object", "properties":{"from": port_schema, "to": port_schema}, "required": ["from","to"]} 
+            ]}
+        },
+        "log_level": log_level_schema,
+        "log_level_db": log_level_schema,
+        "log_level_vimconn": log_level_schema
+    },
+    "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
+    "additionalProperties": False
+}
+
+tenant_schema = {
+    "title":"tenant information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": nameshort_schema,
+                "description": description_schema,
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+tenant_edit_schema = {
+    "title":"tenant edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+            },
+            "additionalProperties": False
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+datacenter_schema_properties={
+                "name": name_schema,
+                "description": description_schema,
+                "type": nameshort_schema, #currently "openvim" or "openstack", can be enlarge with plugins
+                "vim_url": description_schema,
+                "vim_url_admin": description_schema,
+                "config": { "type":"object" }
+            }
+
+datacenter_schema = {
+    "title":"datacenter information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "required": ["name", "vim_url"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+datacenter_edit_schema = {
+    "title":"datacenter edit nformation schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":datacenter_schema_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+
+netmap_new_schema = {
+    "title":"netmap new information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+                "vim_id": id_schema,
+                "vim_name": name_schema
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+netmap_edit_schema = {
+    "title":"netmap edit information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "netmap":{   #delete from datacenter
+            "type":"object",
+            "properties":{
+                "name": name_schema,  #name or uuid of net to change
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+    },
+    "required": ["netmap"],
+    "additionalProperties": False
+}
+
+datacenter_action_schema = {
+    "title":"datacenter action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "net-update":{"type":"null",},
+        "net-edit":{
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+                "name": name_schema,
+                "description": description_schema,
+                "shared": {"type": "boolean"}
+            },
+            "minProperties": 1,
+            "additionalProperties": False
+        },
+        "net-delete":{
+            "type":"object",
+            "properties":{
+                "net": name_schema,  #name or uuid of net to change
+            },
+            "required": ["net"],
+            "additionalProperties": False
+        },
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+
+datacenter_associate_schema={
+    "title":"datacenter associate information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "datacenter":{
+            "type":"object",
+            "properties":{
+                "vim_tenant": id_schema,
+                "vim_tenant_name": nameshort_schema,
+                "vim_username": nameshort_schema,
+                "vim_password": nameshort_schema,
+            },
+#            "required": ["vim_tenant"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["datacenter"],
+    "additionalProperties": False
+}
+
+internal_connection_element_schema = {
+    "type":"object",
+    "properties":{
+        "VNFC": name_schema,
+        "local_iface_name": name_schema
+    }
+}
+
+internal_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description":description_schema,
+        "type":{"type":"string", "enum":["bridge","data","ptp"]},
+        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":2}
+    },
+    "required": ["name", "type", "elements"],
+    "additionalProperties": False
+}
+
+external_connection_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "type":{"type":"string", "enum":["mgmt","bridge","data"]},
+        "VNFC": name_schema,
+        "local_iface_name": name_schema ,
+        "description":description_schema
+    },
+    "required": ["name", "type", "VNFC", "local_iface_name"],
+    "additionalProperties": False
+}
+
+interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name":name_schema,
+            "dedicated":{"type":"string","enum":["yes","no","yes:sriov"]},
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema
+        },
+        "additionalProperties": False,
+        "required": ["name","dedicated", "bandwidth"]
+    }
+}
+
+bridge_interfaces_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "name": name_schema,
+            "bandwidth":bandwidth_schema,
+            "vpci":pci_schema,
+            "mac_address": mac_schema,
+            "model": {"type":"string", "enum":["virtio","e1000","ne2k_pci","pcnet","rtl8139"]}
+        },
+        "additionalProperties": False,
+        "required": ["name"]
+    }
+}
+
+devices_schema={
+    "type":"array",
+    "items":{
+        "type":"object",
+        "properties":{
+            "type":{"type":"string", "enum":["disk","cdrom","xml"] },
+            "image": path_schema,
+            "image metadata": metadata_schema, 
+            "vpci":pci_schema,
+            "xml":xml_text_schema,
+        },
+        "additionalProperties": False,
+        "required": ["type"]
+    }
+}
+
+
+numa_schema = {
+    "type": "object",
+    "properties": {
+        "memory":integer1_schema,
+        "cores":integer1_schema,
+        "paired-threads":integer1_schema,
+        "threads":integer1_schema,
+        "cores-id":{"type":"array","items":integer0_schema},
+        "paired-threads-id":{"type":"array","items":{"type":"array","minItems":2,"maxItems":2,"items":integer0_schema}},
+        "threads-id":{"type":"array","items":integer0_schema},
+        "interfaces":interfaces_schema
+    },
+    "additionalProperties": False,
+    #"required": ["memory"]
+}
+
+vnfc_schema = {
+    "type":"object",
+    "properties":{
+        "name": name_schema,
+        "description": description_schema,
+        "VNFC image": {"oneOf": [path_schema, http_schema]},
+        "image metadata": metadata_schema, 
+        "processor": {
+            "type":"object",
+            "properties":{
+                "model":description_schema,
+                "features":{"type":"array","items":nameshort_schema}
+            },
+            "required": ["model"],
+            "additionalProperties": False
+        },
+        "hypervisor": {
+            "type":"object",
+            "properties":{
+                "type":nameshort_schema,
+                "version":description_schema
+            },
+        },
+        "ram":integer0_schema,
+        "vcpus":integer0_schema,
+        "disk": integer1_schema,
+        "numas": {
+            "type": "array",
+            "items":numa_schema
+        },
+        "bridge-ifaces": bridge_interfaces_schema,
+        "devices": devices_schema
+    },
+    "required": ["name", "VNFC image"],
+    "additionalProperties": False
+}
+
+vnfd_schema_v01 = {
+    "title":"vnfd information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+                "description": description_schema,
+                "class": nameshort_schema,
+                "public": {"type" : "boolean"},
+                "physical": {"type" : "boolean"},
+                "tenant_id": id_schema, #only valid for admin
+                "external-connections": {"type" : "array", "items": external_connection_schema, "minItems":1},
+                "internal-connections": {"type" : "array", "items": internal_connection_schema, "minItems":1},
+                "VNFC":{"type" : "array", "items": vnfc_schema, "minItems":1}
+            },
+            "required": ["name","external-connections"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf"],
+    "additionalProperties": False
+}
+
+#Future VNFD schema to be defined
+vnfd_schema_v02 = {
+    "title":"vnfd information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": schema_version_2,
+        "vnf":{
+            "type":"object",
+            "properties":{
+                "name": name_schema,
+            },
+            "required": ["name"],
+            "additionalProperties": True
+        }
+    },
+    "required": ["vnf", "schema_version"],
+    "additionalProperties": False
+}
+
+#vnfd_schema = vnfd_schema_v01
+#{
+#    "title":"vnfd information schema v0.2",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    "oneOf": [vnfd_schema_v01, vnfd_schema_v02]
+#}
+
+graph_schema = {
+    "title":"graphical scenario descriptor information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "x":      integer0_schema,
+        "y":      integer0_schema,
+        "ifaces": {
+            "type":"object",
+            "properties":{
+                "left": {"type":"array"},
+                "right": {"type":"array"},
+                "bottom": {"type":"array"},
+            }
+        }
+    },
+    "required": ["x","y"]
+}
+
+nsd_schema_v01 = {
+    "title":"network scenario descriptor information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "tenant_id": id_schema, #only valid for admin
+        "public": {"type": "boolean"},
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "type":{"type":"string", "enum":["VNF", "other_network", "network", "external_network"]},
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                            },
+                            "patternProperties":{
+                                "^(VNF )?model$": {"type": "string"}
+                            },
+                            "required": ["type"]
+                        }
+                    }
+                },
+                "connections": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "nodes":{"oneOf":[{"type":"object", "minProperties":2}, {"type":"array", "minLength":1}]},
+                                "type": {"type": "string", "enum":["link", "external_network", "dataplane_net", "bridge_net"]},
+                                "graph": graph_schema
+                            },
+                            "required": ["nodes"]
+                        },
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["name","topology"],
+    "additionalProperties": False
+}
+
+#Future NSD schema to be defined
+nsd_schema_v02 = {
+    "title":"network scenario descriptor information schema v0.2",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": schema_version_2,
+        "scenario":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "description": description_schema,
+                "tenant_id": id_schema, #only valid for admin
+                "public": {"type": "boolean"},
+                "vnfs": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "vnf_id": id_schema,
+                                "graph": graph_schema,
+                                "vnf_name": name_schema,
+                            },
+                        }
+                    },
+                    "minProperties": 1
+                },
+                "networks": {
+                    "type":"object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "interfaces":{"type":"array", "minLength":1},
+                                "type": {"type": "string", "enum":["dataplane", "bridge"]},
+                                "external" : {"type": "boolean"},
+                                "graph": graph_schema
+                            },
+                            "required": ["interfaces"]
+                        },
+                    }
+                },
+            
+            },
+            "required": ["vnfs", "networks","name"],
+            "additionalProperties": False
+        }
+    },
+    "required": ["scenario","schema_version"],
+    "additionalProperties": False
+}
+
+#scenario_new_schema = {
+#    "title":"new scenario information schema",
+#    "$schema": "http://json-schema.org/draft-04/schema#",
+#    #"oneOf": [nsd_schema_v01, nsd_schema_v02]
+#    "oneOf": [nsd_schema_v01]
+#}
+
+scenario_edit_schema = {
+    "title":"edit scenario information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "name":name_schema,
+        "description": description_schema,
+        "topology":{
+            "type":"object",
+            "properties":{
+                "nodes": {
+                    "type":"object",
+                    "patternProperties":{
+                        "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$": {
+                            "type":"object",
+                            "properties":{
+                                "graph":{
+                                    "type": "object",
+                                    "properties":{
+                                        "x": integer0_schema,
+                                        "y": integer0_schema,
+                                        "ifaces":{ "type": "object"}
+                                    }
+                                },
+                                "description": description_schema,
+                                "name": name_schema
+                            }
+                        }
+                    }
+                }
+            },
+            "required": ["nodes"],
+            "additionalProperties": False
+        }
+    },
+    "additionalProperties": False
+}
+
+scenario_action_schema = {
+    "title":"scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "deploy":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "reserve":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        },
+        "verify":{
+            "type": "object",
+            "properties": {
+                "instance_name":name_schema,
+                "description":description_schema,
+                "datacenter": {"type": "string"}
+            },
+            "required": ["instance_name"]
+        }
+    },
+    "minProperties": 1,
+    "maxProperties": 1,
+    "additionalProperties": False
+}
+
+instance_scenario_create_schema = {
+    "title":"instance scenario create information schema v0.1",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "schema_version": {"type": "string", "enum": ["0.1"]},
+        "instance":{
+            "type":"object",
+            "properties":{
+                "name":name_schema,
+                "description":description_schema,
+                "datacenter": name_schema,
+                "scenario" : name_schema, #can be an UUID or name
+                "action":{"enum": ["deploy","reserve","verify" ]},
+                "connect_mgmt_interfaces": {"oneOff": [{"type":"boolean"}, {"type":"object"}]},# can be true or a dict with datacenter: net_name
+                "vnfs":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "name":   name_schema,#override vnf name
+                                "datacenter": name_schema,
+                                "metadata": {"type": "object"},
+                                "user_data": {"type": "string"}
+                            }
+                        }
+                    },
+                },
+                "networks":{             #mapping from scenario to datacenter
+                    "type": "object",
+                    "patternProperties":{
+                        ".": {
+                            "type": "object",
+                            "properties":{
+                                "netmap-create": {"oneOf":[name_schema,{"type": "null"}]}, #datacenter network to use. Null if must be created as an internal net
+                                "netmap-use": name_schema,
+                                "name":   name_schema,#override network name
+                                "datacenter": name_schema,
+                            }
+                        }
+                    },
+                },
+            },
+            "additionalProperties": False,
+            "required": ["scenario", "name"]
+        },
+    },
+    "required": ["instance"],
+    "additionalProperties": False
+    
+}
+
+instance_scenario_action_schema = {
+    "title":"instance scenario action information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "start":{"type": "null"},
+        "pause":{"type": "null"},
+        "resume":{"type": "null"},
+        "shutoff":{"type": "null"},
+        "shutdown":{"type": "null"},
+        "forceOff":{"type": "null"},
+        "rebuild":{"type": "null"},
+        "reboot":{
+            "type": ["object","null"],
+        },
+        "console": {"type": ["string", "null"], "enum": ["novnc", "xvpvnc", "rdp-html5", "spice-html5", None]},
+        "vnfs":{"type": "array", "items":{"type":"string"}},
+        "vms":{"type": "array", "items":{"type":"string"}}
+    },
+    "minProperties": 1,
+    #"maxProperties": 1,
+    "additionalProperties": False
+}
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn.py
new file mode 100644 (file)
index 0000000..3608853
--- /dev/null
@@ -0,0 +1,391 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+vimconn implement an Abstract class for the vim connector plugins
+ with the definition of the method to be implemented.
+'''
+__author__="Alfonso Tierno"
+__date__ ="$16-oct-2015 11:09:29$"
+
+import logging
+
+#Error variables 
+HTTP_Bad_Request = 400
+HTTP_Unauthorized = 401 
+HTTP_Not_Found = 404 
+HTTP_Method_Not_Allowed = 405 
+HTTP_Request_Timeout = 408
+HTTP_Conflict = 409
+HTTP_Not_Implemented = 501
+HTTP_Service_Unavailable = 503 
+HTTP_Internal_Server_Error = 500 
+
+class vimconnException(Exception):
+    '''Common and base class Exception for all vimconnector exceptions'''
+    def __init__(self, message, http_code=HTTP_Bad_Request):
+        Exception.__init__(self, message)
+        self.http_code = http_code
+
+class vimconnConnectionException(vimconnException):
+    '''Connectivity error with the VIM'''
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+    
+class vimconnUnexpectedResponse(vimconnException):
+    '''Get an wrong response from VIM'''
+    def __init__(self, message, http_code=HTTP_Service_Unavailable):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnAuthException(vimconnException):
+    '''Invalid credentials or authorization to perform this action over the VIM'''
+    def __init__(self, message, http_code=HTTP_Unauthorized):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnNotFoundException(vimconnException):
+    '''The item is not found at VIM'''
+    def __init__(self, message, http_code=HTTP_Not_Found):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnConflictException(vimconnException):
+    '''There is a conflict, e.g. more item found than one'''
+    def __init__(self, message, http_code=HTTP_Conflict):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnNotImplemented(vimconnException):
+    '''The method is not implemented by the connected'''
+    def __init__(self, message, http_code=HTTP_Not_Implemented):
+        vimconnException.__init__(self, message, http_code)
+
+class vimconnector():
+    '''Abstract base class for all the VIM connector plugins
+    These plugins must implement a vimconnector class derived from this 
+    and all these methods
+    ''' 
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level="ERROR", config={}):
+        self.id        = uuid
+        self.name      = name
+        self.url       = url
+        self.url_admin = url_admin
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.user      = user
+        self.passwd    = passwd
+        self.config    = config
+        self.logger = logging.getLogger('mano.vim')
+        self.logger.setLevel( getattr(logging, log_level) )
+        if not self.url_admin:  #try to use normal url 
+            self.url_admin = self.url
+    
+    def __getitem__(self,index):
+        if index=='tenant_id':
+            return self.tenant_id
+        if index=='tenant_name':
+            return self.tenant_name
+        elif index=='id':
+            return self.id
+        elif index=='name':
+            return self.name
+        elif index=='user':
+            return self.user
+        elif index=='passwd':
+            return self.passwd
+        elif index=='url':
+            return self.url
+        elif index=='url_admin':
+            return self.url_admin
+        elif index=="config":
+            return self.config
+        else:
+            raise KeyError("Invalid key '%s'" %str(index))
+        
+    def __setitem__(self,index, value):
+        if index=='tenant_id':
+            self.tenant_id = value
+        if index=='tenant_name':
+            self.tenant_name = value
+        elif index=='id':
+            self.id = value
+        elif index=='name':
+            self.name = value
+        elif index=='user':
+            self.user = value
+        elif index=='passwd':
+            self.passwd = value
+        elif index=='url':
+            self.url = value
+        elif index=='url_admin':
+            self.url_admin = value
+        else:
+            raise KeyError("Invalid key '%s'" %str(index))
+        
+    def new_tenant(self,tenant_name,tenant_description):
+        '''Adds a new tenant to VIM with this name and description,
+        returns the tenant identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_tenant(self,tenant_id,):
+        '''Delete a tenant from VIM'''
+        '''Returns the tenant identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: 
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_network(self,net_name, net_type, shared=False):
+        '''Adds a tenant network to VIM
+            net_type can be 'bridge','data'.'ptp'.  TODO: this need to be revised 
+            shared is a boolean
+        Returns the network identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            shared: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_network(self, net_id):
+        '''Obtain network details of net_id VIM network'
+           Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_network(self, net_id):
+        '''Deletes a tenant network from VIM, provide the network id.
+        Returns the network identifier or raise an exception'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM
+            Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_flavor(self, flavor_data):
+        '''Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address   
+                disk: disk size
+                is_public:
+                       
+                
+                    
+                 #TODO to concrete
+        Returns the flavor identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_flavor(self, flavor_id):
+        '''Deletes a tenant flavor from VIM identify by its id
+        Returns the used id or raise an exception'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_image(self,image_dict):
+        '''
+        Adds a tenant image to VIM
+        Returns:
+            200, image-id        if the image is created
+            <0, message          if there is an error
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from VIM'''
+        '''Returns the HTTP response code and a message indicating details of the success or fail'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database'''
+        '''Returns:
+             0,"Image not found"   if there are no images with that path
+             1,image-id            if there is one image with that path
+             <0,message            if there was an error (Image not found, error contacting VIM, more than 1 image with that path, etc.) 
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list):
+        '''Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: image and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign
+                model: interface model, virtio, e2000, ...
+                mac_address: 
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                vim_id: filled/added by this function
+                #TODO ip, security groups
+        Returns >=0, the instance identifier
+                <0, error_text
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def get_vminstance(self,vm_id):
+        '''Returns the VM instance information from VIM'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def delete_vminstance(self, vm_id):
+        '''Removes a VM instance from VIM'''
+        '''Returns the instance identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def refresh_vms_status(self, vm_list):
+        '''Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                #  CREATING (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces:
+                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def action_vminstance(self, vm_id, action_dict):
+        '''Send and action over a VM instance from VIM
+        Returns the vm_id if the action was successfully sent to the VIM'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def get_vminstance_console(self,vm_id, console_type="vnc"):
+        '''
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types, 
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address 
+                port:     the http, ssh, ... port 
+                suffix:   extra text, e.g. the http path and query string   
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+#NOT USED METHODS in current version        
+
+    def host_vim2gui(self, host, server_dict):
+        '''Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        '''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def get_processor_rankings(self):
+        '''Get the processor rankings in the VIM database'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def new_host(self, host_data):
+        '''Adds a new host to VIM'''
+        '''Returns status code of the VIM response'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+    
+    def new_external_port(self, port_data):
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+        
+    def new_external_network(self,net_name,net_type):
+        '''Adds a external network to VIM (shared)'''
+        '''Returns the network identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def connect_port_network(self, port_id, network_id, admin=False):
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
+    def new_vminstancefromJSON(self, vm_data):
+        '''Adds a VM instance to VIM'''
+        '''Returns the instance identifier'''
+        raise vimconnNotImplemented( "Should have implemented this" )
+
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rift/rwcal/openmano_vimconnector/vimconn_openvim.py
new file mode 100644 (file)
index 0000000..6286b6a
--- /dev/null
@@ -0,0 +1,1372 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+'''
+vimconnector implements all the methods to interact with openvim using the openvim API.
+'''
+__author__="Alfonso Tierno, Gerardo Garcia"
+__date__ ="$26-aug-2014 11:09:29$"
+
+from . import vimconn
+import requests
+import json
+import yaml
+import logging
+from .openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
+                            vlan1000_schema, integer0_schema
+from jsonschema import validate as js_v, exceptions as js_e
+
+'''contain the openvim virtual machine status to openmano status'''
+vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                     'PAUSED':'PAUSED',
+                     'SUSPENDED': 'SUSPENDED',
+                     'INACTIVE':'INACTIVE',
+                     'CREATING':'BUILD',
+                     'ERROR':'ERROR','DELETED':'DELETED'
+                     }
+netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
+                     }
+
+
+host_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id"]
+}
+image_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+flavor_schema = {
+    "type":"object",
+    "properties":{
+        "id": id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+server_schema = {
+    "type":"object",
+    "properties":{
+        "id":id_schema,
+        "name": name_schema,
+    },
+    "required": ["id","name"]
+}
+new_host_response_schema = {
+    "title":"host response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "host": host_schema
+    },
+    "required": ["host"],
+    "additionalProperties": False
+}
+
+get_images_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "images":{
+            "type":"array",
+            "items": image_schema,
+        }
+    },
+    "required": ["images"],
+    "additionalProperties": False
+}
+
+
+get_flavors_response_schema = {
+    "title":"openvim flavors response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavors":{
+            "type":"array",
+            "items": flavor_schema,
+        }
+    },
+    "required": ["flavors"],
+    "additionalProperties": False
+}
+
+
+get_hosts_response_schema = {
+    "title":"openvim hosts response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "hosts":{
+            "type":"array",
+            "items": host_schema,
+        }
+    },
+    "required": ["hosts"],
+    "additionalProperties": False
+}
+
+get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
+
+get_server_response_schema = {
+    "title":"openvim server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "servers":{
+            "type":"array",
+            "items": server_schema,
+        }
+    },
+    "required": ["servers"],
+    "additionalProperties": False
+}
+
+new_tenant_response_schema = {
+    "title":"tenant response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "tenant":{
+            "type":"object",
+            "properties":{
+                "id": id_schema,
+                "name": nameshort_schema,
+                "description":description_schema,
+                "enabled":{"type" : "boolean"}
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["tenant"],
+    "additionalProperties": False
+}
+
+new_network_response_schema = {
+    "title":"network response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "network":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+                "name":name_schema,
+                "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                "shared":{"type":"boolean"},
+                "tenant_id":id_schema,
+                "admin_state_up":{"type":"boolean"},
+                "vlan":vlan1000_schema
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["network"],
+    "additionalProperties": False
+}
+
+
+# get_network_response_schema = {
+#     "title":"get network response information schema",
+#     "$schema": "http://json-schema.org/draft-04/schema#",
+#     "type":"object",
+#     "properties":{
+#         "network":{
+#             "type":"object",
+#             "properties":{
+#                 "id":id_schema,
+#                 "name":name_schema,
+#                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+#                 "shared":{"type":"boolean"},
+#                 "tenant_id":id_schema,
+#                 "admin_state_up":{"type":"boolean"},
+#                 "vlan":vlan1000_schema
+#             },
+#             "required": ["id"]
+#         }
+#     },
+#     "required": ["network"],
+#     "additionalProperties": False
+# }
+
+
+new_port_response_schema = {
+    "title":"port response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "port":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["port"],
+    "additionalProperties": False
+}
+
+get_flavor_response_schema = {
+    "title":"openvim flavors response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+                #"extended": {"type":"object"},
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+new_flavor_response_schema = {
+    "title":"flavor response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "flavor":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["flavor"],
+    "additionalProperties": False
+}
+
+get_image_response_schema = {
+    "title":"openvim images response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":   id_schema,
+                "name": name_schema,
+            },
+            "required": ["id", "name"],
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+new_image_response_schema = {
+    "title":"image response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "image":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["image"],
+    "additionalProperties": False
+}
+
+new_vminstance_response_schema = {
+    "title":"server response information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "server":{
+            "type":"object",
+            "properties":{
+                "id":id_schema,
+            },
+            "required": ["id"]
+        }
+    },
+    "required": ["server"],
+    "additionalProperties": False
+}
+
+get_processor_rankings_response_schema = {
+    "title":"processor rankings information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "rankings":{
+            "type":"array",
+            "items":{
+                "type":"object",
+                "properties":{
+                    "model": description_schema,
+                    "value": integer0_schema
+                },
+                "additionalProperties": False,
+                "required": ["model","value"]
+            }
+        },
+        "additionalProperties": False,
+        "required": ["rankings"]
+    }
+}
+
+class vimconnector(vimconn.vimconnector):
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,log_level="DEBUG",config={}):
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+        self.tenant = None
+        self.headers_req = {'content-type': 'application/json'}
+        self.logger = logging.getLogger('mano.vim.openvim')
+        if tenant_id:
+            self.tenant = tenant_id
+
+    def __setitem__(self,index, value):
+        '''Set individuals parameters 
+        Throw TypeError, KeyError
+        '''
+        if index=='tenant_id':
+            self.tenant = value
+        elif index=='tenant_name':
+            self.tenant = None
+        vimconn.vimconnector.__setitem__(self,index, value)    
+
+    def _get_my_tenant(self):
+        '''Obtain uuid of my tenant from name
+        '''
+        if self.tenant:
+            return self.tenant
+
+        url = self.url+'/tenants?name='+ self.tenant_name
+        self.logger.info("Getting VIM tenant_id GET %s", url)
+        vim_response = requests.get(url, headers = self.headers_req)
+        self._check_http_request_response(vim_response)
+        try:
+            tenant_list = vim_response.json()["tenants"]
+            if len(tenant_list) == 0:
+                raise vimconn.vimconnNotFoundException("No tenant found for name '%s'" % str(self.tenant_name))
+            elif len(tenant_list) > 1:
+                raise vimconn.vimconnConflictException ("More that one tenant found for name '%s'" % str(self.tenant_name))
+            self.tenant = tenant_list[0]["id"]
+            return self.tenant
+        except Exception as e:
+            raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
+
+    def _format_jsonerror(self,http_response):
+        #DEPRECATED, to delete in the future
+        try:
+            data = http_response.json()
+            return data["error"]["description"]
+        except:
+            return http_response.text
+
+    def _format_in(self, http_response, schema):
+        #DEPRECATED, to delete in the future
+        try:
+            client_data = http_response.json()
+            js_v(client_data, schema)
+            #print "Input data: ", str(client_data)
+            return True, client_data
+        except js_e.ValidationError as exc:
+            print("validate_in error, jsonschema exception {} at {}",exc.message, exc.path)
+            return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+    
+    def _remove_extra_items(self, data, schema):
+        deleted=[]
+        if type(data) is tuple or type(data) is list:
+            for d in data:
+                a= self._remove_extra_items(d, schema['items'])
+                if a is not None: deleted.append(a)
+        elif type(data) is dict:
+            #for k in data.keys():
+            for k in list(data):
+                if 'properties' not in schema or k not in schema['properties'].keys():
+                    del data[k]
+                    deleted.append(k)
+                else:
+                    a = self._remove_extra_items(data[k], schema['properties'][k])
+                    if a is not None:  deleted.append({k:a})
+        if len(deleted) == 0: return None
+        elif len(deleted) == 1: return deleted[0]
+        else: return deleted
+        
+    def _format_request_exception(self, request_exception):
+        '''Transform a request exception into a vimconn exception'''
+        if isinstance(request_exception, js_e.ValidationError):
+            raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))            
+        elif isinstance(request_exception, requests.exceptions.HTTPError):
+            raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
+        else:
+            raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
+
+    def _check_http_request_response(self, request_response):
+        '''Raise a vimconn exception if the response is not Ok'''
+        if request_response.status_code >= 200 and  request_response.status_code < 300:
+            return
+        if request_response.status_code == vimconn.HTTP_Unauthorized:
+            raise vimconn.vimconnAuthException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Not_Found:
+            raise vimconn.vimconnNotFoundException(request_response.text)
+        elif request_response.status_code == vimconn.HTTP_Conflict:
+            raise vimconn.vimconnConflictException(request_response.text)
+        else: 
+            raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
+
+    def new_tenant(self,tenant_name,tenant_description):
+        '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+        #print "VIMConnector: Adding a new tenant to VIM"
+        payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
+        payload_req = json.dumps(payload_dict)
+        try:
+            url = self.url_admin+'/tenants'
+            self.logger.info("Adding a new tenant %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_tenant_response_schema)
+            #r = self._remove_extra_items(response, new_tenant_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            tenant_id = response['tenant']['id']
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_tenant(self,tenant_id):
+        '''Delete a tenant from VIM. Returns the old tenant identifier'''
+        try:
+            url = self.url_admin+'/tenants/'+tenant_id
+            self.logger.info("Delete a tenant DELETE %s", url)
+            vim_response = requests.delete(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return tenant_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_tenant_list(self, filter_dict={}):
+        '''Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+        '''
+        filterquery=[]
+        filterquery_text=''
+        for k,v in filter_dict.items():
+            filterquery.append(str(k)+'='+str(v))
+        if len(filterquery)>0:
+            filterquery_text='?'+ '&'.join(filterquery)
+        try:
+            url = self.url+'/tenants'+filterquery_text
+            self.logger.info("get_tenant_list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vim_response.json()["tenants"]
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+
+    def new_network(self,net_name,net_type, shared=False, **vim_specific):
+        '''Adds a tenant network to VIM'''
+        '''Returns the network identifier'''
+        try:
+            self._get_my_tenant()
+            if net_type=="bridge":
+                net_type="bridge_data"
+            payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
+            payload_req.update(vim_specific)
+            url = self.url+'/networks'
+            self.logger.info("Adding a new network POST: %s  DATA: %s", url, str(payload_req))
+            print(payload_req)
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_network_response_schema)
+            #r = self._remove_extra_items(response, new_network_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            network_id = response['network']['id']
+            return network_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def get_network_list(self, filter_dict={}):
+        '''Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name
+            id: network uuid
+            public: boolean
+            tenant_id: tenant
+            admin_state_up: boolean
+            status: 'ACTIVE'
+        Returns the network list of dictionaries
+        '''
+        try:
+            if 'tenant_id' not in filter_dict:
+                filter_dict["tenant_id"] = self._get_my_tenant()
+            elif not filter_dict["tenant_id"]:
+                del filter_dict["tenant_id"]
+            filterquery=[]
+            filterquery_text=''
+            for k,v in filter_dict.items():
+                filterquery.append(str(k)+'='+str(v))
+            if len(filterquery)>0:
+                filterquery_text='?'+ '&'.join(filterquery)
+            url = self.url+'/networks'+filterquery_text
+            self.logger.info("Getting network list GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['networks']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_network(self, net_id):
+        '''Obtain network details of network id'''
+        try:
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Getting network GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['network']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_network(self, net_id):
+        '''Deletes a tenant network from VIM'''
+        '''Returns the network identifier'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/networks/'+net_id
+            self.logger.info("Deleting VIM network DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return net_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+
+    def get_flavor_list(self):
+        '''Obtain flavor details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors'
+            self.logger.info("Getting flavor GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_flavors_response_schema)
+            r = self._remove_extra_items(response, get_flavors_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['flavors']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+
+    def get_flavor(self, flavor_id):
+        '''Obtain flavor details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Getting flavor GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_flavor_response_schema)
+            #r = self._remove_extra_items(response, get_flavor_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['flavor']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def new_flavor(self, flavor_data):
+        '''Adds a tenant flavor to VIM'''
+        '''Returns the flavor identifier'''
+        try:
+            self._get_my_tenant()
+            payload_req = json.dumps({'flavor': flavor_data})
+            url = self.url+'/'+self.tenant+'/flavors'
+            self.logger.info("Adding a new VIM flavor POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_flavor_response_schema)
+            r = self._remove_extra_items(response, new_flavor_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            flavor_id = response['flavor']['id']
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def delete_flavor(self,flavor_id):
+        '''Deletes a tenant flavor from VIM'''
+        '''Returns the old flavor_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+            self.logger.info("Deleting VIM flavor DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return flavor_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image_list(self):
+        '''Obtain image details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/images'
+            self.logger.info("Getting image GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_images_response_schema)
+            #r = self._remove_extra_items(response, get_images_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['images']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def get_image(self, image_id):
+        '''Obtain image details from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/images/'+image_id
+            self.logger.info("Getting image GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_image_response_schema)
+            #r = self._remove_extra_items(response, get_image_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['image']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_image(self,image_dict):
+        ''' Adds a tenant image to VIM, returns image_id'''
+        try:
+            self._get_my_tenant()
+            new_image_dict={'name': image_dict['name']}
+            if image_dict.get('description'):
+                new_image_dict['description'] = image_dict['description']
+            if image_dict.get('metadata'):
+                new_image_dict['metadata'] = yaml.load(image_dict['metadata'])
+            if image_dict.get('location'):
+                new_image_dict['path'] = image_dict['location']
+            payload_req = json.dumps({"image":new_image_dict})
+            url=self.url + '/' + self.tenant + '/images'
+            self.logger.info("Adding a new VIM image POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_image_response_schema)
+            r = self._remove_extra_items(response, new_image_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            image_id = response['image']['id']
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+            
+    def delete_image(self, image_id):
+        '''Deletes a tenant image from VIM'''
+        '''Returns the deleted image_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url + '/'+ self.tenant +'/images/'+image_id
+            self.logger.info("Deleting VIM image DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return image_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    
+    def get_image_id_from_path(self, path):
+        '''Get the image id from image path in the VIM database'''
+        try:
+            self._get_my_tenant()
+            url=self.url + '/' + self.tenant + '/images?path='+path
+            self.logger.info("Getting images GET %s", url)
+            vim_response = requests.get(url)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_images_response_schema)
+            #r = self._remove_extra_items(response, get_images_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            if len(response['images'])==0:
+                raise vimconn.vimconnNotFoundException("Image not found at VIM with path '%s'", path)
+            elif len(response['images'])>1:
+                raise vimconn.vimconnConflictException("More than one image found at VIM with path '%s'", path)
+            return response['images'][0]['id']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def new_vminstancefromJSON(self, vm_data):
+        '''Adds a VM instance to VIM'''
+        '''Returns the instance identifier'''
+        try:
+            self._get_my_tenant()
+        except Exception as e:
+            return -vimconn.HTTP_Not_Found, str(e)
+        print("VIMConnector: Adding a new VM instance from JSON to VIM")
+        payload_req = vm_data
+        try:
+            vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as  e:
+            print("new_vminstancefromJSON Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_image_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_image_response_schema)
+                if r is not None: print("Warning: remove extra items {}", r)
+                #print http_content
+                vminstance_id = http_content['server']['id']
+                print("Tenant image id: ",vminstance_id)
+                return vim_response.status_code,vminstance_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new vm instance. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list):
+        '''Adds a VM instance to VIM
+        Params:
+            start: indicates if VM must start or boot in pause mode. Ignored
+            image_id,flavor_id: image and flavor uuid
+            net_list: list of interfaces, each one is a dictionary with:
+                name:
+                net_id: network uuid to connect
+                vpci: virtual vcpi to assign
+                model: interface model, virtio, e2000, ...
+                mac_address: 
+                use: 'data', 'bridge',  'mgmt'
+                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                vim_id: filled/added by this function
+                #TODO ip, security groups
+        Returns the instance identifier
+        '''
+        try:
+            self._get_my_tenant()
+#            net_list = []
+#            for k,v in net_dict.items():
+#                print k,v
+#                net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
+#            net_list_string = ', '.join(net_list) 
+            virtio_net_list=[]
+            for net in net_list:
+                if not net.get("net_id"):
+                    continue
+                net_dict={'uuid': net["net_id"]}
+                if net.get("type"):        net_dict["type"] = net["type"]
+                if net.get("name"):        net_dict["name"] = net["name"]
+                if net.get("vpci"):        net_dict["vpci"] = net["vpci"]
+                if net.get("model"):       net_dict["model"] = net["model"]
+                if net.get("mac_address"): net_dict["mac_address"] = net["mac_address"]
+                virtio_net_list.append(net_dict)
+            payload_dict={  "name":        name,
+                            "description": description,
+                            "imageRef":    image_id,
+                            "flavorRef":   flavor_id,
+                            "networks": virtio_net_list
+                        }
+            if start != None:
+                payload_dict["start"] = start
+            payload_req = json.dumps({"server": payload_dict})
+            url = self.url+'/'+self.tenant+'/servers'
+            self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            vminstance_id = response['server']['id']
+
+            #connect data plane interfaces to network
+            for net in net_list:
+                if net["type"]=="virtual":
+                    if not net.get("net_id"):
+                        continue
+                    for iface in response['server']['networks']:
+                        if "name" in net:
+                            if net["name"]==iface["name"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                        elif "net_id" in net:
+                            if net["net_id"]==iface["net_id"]:
+                                net["vim_id"] = iface['iface_id']
+                                break
+                else: #dataplane
+                    for numa in response['server'].get('extended',{}).get('numas',() ):
+                        for iface in numa.get('interfaces',() ):
+                            if net['name'] == iface['name']:
+                                net['vim_id'] = iface['iface_id']
+                                #Code bellow is not needed, current openvim connect dataplane interfaces 
+                                #if net.get("net_id"):
+                                ##connect dataplane interface
+                                #    result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
+                                #    if result < 0:
+                                #        error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
+                                #        print "new_vminstance: " + error_text
+                                #        self.delete_vminstance(vminstance_id)
+                                #        return result, error_text
+                                break
+        
+            return vminstance_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+
+    def get_vminstance_list(self):
+        '''Obtain VM instance list from the  VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers'
+            self.logger.info("Getting servers GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, get_server_response_schema)
+            r = self._remove_extra_items(response, get_server_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['servers']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+        
+    def get_vminstance(self, vm_id):
+        '''Returns the VM instance information from VIM'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Getting vm GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_vminstance_response_schema)
+            #r = self._remove_extra_items(response, new_vminstance_response_schema)
+            #if r is not None: 
+            #    self.logger.warn("Warning: remove extra items %s", str(r))
+            return response['server']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+        
+    def delete_vminstance(self, vm_id):
+        '''Removes a VM instance from VIM, returns the deleted vm_id'''
+        try:
+            self._get_my_tenant()
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id
+            self.logger.info("Deleting VIM vm DELETE %s", url)
+            vim_response = requests.delete(url, headers=self.headers_req)
+            self._check_http_request_response(vim_response)
+            #self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            return vm_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+    def refresh_vms_status(self, vm_list):
+        '''Refreshes the status of the virtual machines'''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        vm_dict={}
+        for vm_id in vm_list:
+            vm={}
+            #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
+            try:
+                url = self.url+'/'+self.tenant+'/servers/'+ vm_id
+                self.logger.info("Getting vm GET %s", url)
+                vim_response = requests.get(url, headers = self.headers_req)
+                self._check_http_request_response(vim_response)
+                response = vim_response.json()
+                js_v(response, new_vminstance_response_schema)
+                if response['server']['status'] in vmStatus2manoFormat:
+                    vm['status'] = vmStatus2manoFormat[ response['server']['status']  ]
+                else:
+                    vm['status'] = "OTHER"
+                    vm['error_msg'] = "VIM status reported " + response['server']['status']
+                if response['server'].get('last_error'):
+                    vm['error_msg'] = response['server']['last_error']
+                vm["vim_info"] = yaml.safe_dump(response['server'])
+                #get interfaces info
+                try:
+                    management_ip = False
+                    url2 = self.url+'/ports?device_id='+ vm_id
+                    self.logger.info("Getting PORTS GET %s", url2)
+                    vim_response2 = requests.get(url2, headers = self.headers_req)
+                    self._check_http_request_response(vim_response2)
+                    client_data = vim_response2.json()
+                    if isinstance(client_data.get("ports"), list):
+                        vm["interfaces"]=[]
+                    for port in client_data.get("ports"):
+                        interface={}
+                        interface['vim_info']  = yaml.safe_dump(port)
+                        interface["mac_address"] = port.get("mac_address")
+                        interface["vim_net_id"] = port["network_id"]
+                        interface["vim_interface_id"] = port["id"]
+                        interface["ip_address"] = port.get("ip_address")
+                        if interface["ip_address"]:
+                            management_ip = True
+                        if interface["ip_address"] == "0.0.0.0":
+                            interface["ip_address"] = None
+                        vm["interfaces"].append(interface)
+                        
+                except Exception as e:
+                    self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
+
+                if vm['status'] == "ACTIVE" and not management_ip:
+                    vm['status'] = "ACTIVE:NoMgmtIP"
+                    
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "DELETED"
+                vm['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting vm status: %s", str(e))
+                vm['status'] = "VIM_ERROR"
+                vm['error_msg'] = str(e)
+            vm_dict[vm_id] = vm
+        return vm_dict
+
+    def refresh_nets_status(self, net_list):
+        '''Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        '''
+        try:
+            self._get_my_tenant()
+        except requests.exceptions.RequestException as e:
+            self._format_request_exception(e)
+        
+        net_dict={}
+        for net_id in net_list:
+            net = {}
+            #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
+            try:
+                net_vim = self.get_network(net_id)
+                if net_vim['status'] in netStatus2manoFormat:
+                    net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                else:
+                    net["status"] = "OTHER"
+                    net["error_msg"] = "VIM status reported " + net_vim['status']
+                    
+                if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
+                    net["status"] = "DOWN"
+                if net_vim.get('last_error'):
+                    net['error_msg'] = net_vim['last_error']
+                net["vim_info"] = yaml.safe_dump(net_vim)
+            except vimconn.vimconnNotFoundException as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "DELETED"
+                net['error_msg'] = str(e)
+            except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                self.logger.error("Exception getting net status: %s", str(e))
+                net['status'] = "VIM_ERROR"
+                net['error_msg'] = str(e)
+            net_dict[net_id] = net
+        return net_dict
+    
+    def action_vminstance(self, vm_id, action_dict):
+        '''Send and action over a VM instance from VIM'''
+        '''Returns the status'''
+        try:
+            self._get_my_tenant()
+            if "console" in action_dict:
+                raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
+            url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
+            self.logger.info("Action over VM instance POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
+            self._check_http_request_response(vim_response)
+            return vm_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+
+#NOT USED METHODS in current version        
+  
+    def host_vim2gui(self, host, server_dict):
+        '''Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        '''
+        if type(server_dict) is not dict: 
+            print('vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary')
+            return
+        RAD={}
+        occupation={}
+        for numa in host['host']['numas']:
+            RAD_item={}
+            occupation_item={}
+            #memory
+            RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
+            occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
+            #cpus
+            RAD_item['cpus']={}
+            RAD_item['cpus']['cores'] = []
+            RAD_item['cpus']['eligible_cores'] = []
+            occupation_item['cores']=[]
+            for _ in range(0, len(numa['cores']) / 2):
+                RAD_item['cpus']['cores'].append( [] )
+            for core in numa['cores']:
+                RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
+                if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
+                if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
+            #ports
+            RAD_item['ports']={}
+            occupation_item['ports']={}
+            for iface in numa['interfaces']:
+                RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
+                occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] / iface['Mbps']) + "%" }
+                
+            RAD[ numa['numa_socket'] ] = RAD_item
+            occupation[ numa['numa_socket'] ] = occupation_item
+        server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
+
+    def get_hosts_info(self):
+        '''Get the information of deployed hosts
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_hosts_info Exception: {}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {}, response:{} {}",url, vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_hosts_info error getting host list %d %s',vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain hosts details
+        hosts_dict={}
+        for host in hosts['hosts']:
+            url=self.url+'/hosts/'+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print("get_hosts_info Exception: ", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get {} response{} {}", url,vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts_info error getting detailed host %d %s', vim_response.status_code, vim_response.json())
+                continue
+            res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
+            if res==False:
+                print("vimconnector.get_hosts_info error parsing GET HOSTS/%s vim response", host['id'], host_detail)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            self.host_vim2gui(host_detail, hosts_dict)
+        return 200, hosts_dict
+
+    def get_hosts(self, vim_tenant):
+        '''Get the hosts and deployed instances
+        Returns the hosts content'''
+    #obtain hosts list
+        url=self.url+'/hosts'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_hosts Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {} response:{} {}",   url , vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_hosts error getting host list %d %s', vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting host list"
+        
+        res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+            
+        if res==False:
+            print("vimconnector.get_host error parsing GET HOSTS vim response {}", hosts)
+            return vimconn.HTTP_Internal_Server_Error, hosts
+    #obtain instances from hosts
+        for host in hosts['hosts']:
+            url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
+            try:
+                vim_response = requests.get(url)
+            except requests.exceptions.RequestException as e:
+                print("get_hosts Exception:{}", e.args)
+                return -vimconn.HTTP_Not_Found, str(e.args[0])
+            print("vim get {} response: {} {}",  url, vim_response.status_code, vim_response.json())
+            if vim_response.status_code != 200:
+                print('vimconnector.get_hosts error getting instances at host %d %s',vim_response.status_code, vim_response.json())
+                continue
+            res,servers = self._format_in(vim_response, get_server_response_schema)
+            if res==False:
+                print("vimconnector.get_host error parsing GET SERVERS/%s vim response",host['id'], servers)
+                continue
+            #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+            host['instances'] = servers['servers']
+        return 200, hosts['hosts']
+
+    def get_processor_rankings(self):
+        '''Get the processor rankings in the VIM database'''
+        url=self.url+'/processor_ranking'
+        try:
+            vim_response = requests.get(url)
+        except requests.exceptions.RequestException as e:
+            print("get_processor_rankings Exception:{}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print("vim get {} response: {} {}", url, vim_response.status_code, vim_response.json())
+        #print vim_response.status_code
+        #print json.dumps(vim_response.json(), indent=4)
+        if vim_response.status_code != 200:
+            #TODO: get error
+            print('vimconnector.get_processor_rankings error getting processor rankings %d %s',vim_response.status_code, vim_response.json())
+            return -vim_response.status_code, "Error getting processor rankings"
+        
+        res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
+        return res, rankings['rankings']
+    
+    def new_host(self, host_data):
+        '''Adds a new host to VIM'''
+        '''Returns status code of the VIM response'''
+        payload_req = host_data
+        try:
+            url = self.url_admin+'/hosts'
+            self.logger.info("Adding a new host POST %s", url)
+            vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            js_v(response, new_host_response_schema)
+            r = self._remove_extra_items(response, new_host_response_schema)
+            if r is not None: 
+                self.logger.warn("Warning: remove extra items %s", str(r))
+            host_id = response['host']['id']
+            return host_id
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
+    
+    def new_external_port(self, port_data):
+        '''Adds a external port to VIM'''
+        '''Returns the port identifier'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Adding a new external port")
+        payload_req = port_data
+        try:
+            vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error("new_external_port Exception: ", str(e))
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+        #print vim_response.json()
+        #print json.dumps(vim_response.json(), indent=4)
+            res, http_content = self._format_in(vim_response, new_port_response_schema)
+        #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id: {}",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new external port. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def new_external_network(self,net_name,net_type):
+        '''Adds a external network to VIM (shared)'''
+        '''Returns the network identifier'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Adding external shared network to VIM (type {}:{})", net_type.net_name)
+        
+        payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
+        try:
+            vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            self.logger.error( "new_external_network Exception: ", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_network_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_network_response_schema)
+                if r is not None: print("Warning: remove extra items ", r)
+                #print http_content
+                network_id = http_content['network']['id']
+                print("Network id: ",network_id)
+                return vim_response.status_code,network_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            #print vim_response.text
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to add new external network. HTTP Response: %d. Error: %s' % (self.url, vim_response.status_code, jsonerror)
+            #print text
+            return -vim_response.status_code,text
+        
+    def connect_port_network(self, port_id, network_id, admin=False):
+        '''Connects a external port to a network'''
+        '''Returns status code of the VIM response'''
+        #TODO change to logging exception code policies
+        print("VIMConnector: Connecting external port to network")
+        
+        payload_req = '{"port":{"network_id":"' + network_id + '"}}'
+        if admin:
+            if self.url_admin==None:
+                return -vimconn.HTTP_Unauthorized, "datacenter cannot contain  admin URL"
+            url= self.url_admin
+        else:
+            url= self.url
+        try:
+            vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
+        except requests.exceptions.RequestException as e:
+            print("connect_port_network Exception: {}", e.args)
+            return -vimconn.HTTP_Not_Found, str(e.args[0])
+        print(vim_response)
+        #print vim_response.status_code
+        if vim_response.status_code == 200:
+            #print vim_response.json()
+            #print json.dumps(vim_response.json(), indent=4)
+            res,http_content = self._format_in(vim_response, new_port_response_schema)
+            #print http_content
+            if res:
+                r = self._remove_extra_items(http_content, new_port_response_schema)
+                if r is not None: print( "Warning: remove extra items ", r)
+                #print http_content
+                port_id = http_content['port']['id']
+                print("Port id:{} ",port_id)
+                return vim_response.status_code,port_id
+            else: return -vimconn.HTTP_Bad_Request,http_content
+        else:
+            print(vim_response.text)
+            jsonerror = self._format_jsonerror(vim_response)
+            text = 'Error in VIM "%s": not possible to connect external port to network. HTTP Response: %d. Error: %s' % (self.url_admin, vim_response.status_code, jsonerror)
+            print(text)
+            return -vim_response.status_code,text
+        
+    def get_port(self, port_id):
+        '''Obtain port details of port id'''
+        try:
+            url = self.url+'/ports/'+port_id
+            self.logger.info("Getting port GET %s", url)
+            vim_response = requests.get(url, headers = self.headers_req)
+            self._check_http_request_response(vim_response)
+            self.logger.debug(vim_response.text)
+            #print json.dumps(vim_response.json(), indent=4)
+            response = vim_response.json()
+            return response['port']
+        except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+            self._format_request_exception(e)
diff --git a/rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py b/rwcal/plugins/vala/rwcal_openmano_vimconnector/rwcal_openmano_vimconnector.py
new file mode 100644 (file)
index 0000000..aa3d971
--- /dev/null
@@ -0,0 +1,664 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+import rift.rwcal.openmano_vimconnector as vimconn_openvim
+import contextlib
+import requests
+import paramiko
+import os
+import uuid
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.openmano_vimconnector')
+
+class UnknownAccountError(Exception):
+    pass
+
+class OpenvimCALOperationFailure(Exception):
+    pass
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+class UninitializedPluginError(Exception):
+    pass
+
+rwstatus_exception_map = {IndexError: RwTypes.RwStatus.NOTFOUND,
+                          KeyError: RwTypes.RwStatus.NOTFOUND,
+                          UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+                          MissingFileError: RwTypes.RwStatus.NOTFOUND,
+                          } 
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+class RwcalOpenmanoVimConnector(GObject.Object, RwCal.Cloud):
+    """Stub implementation the CAL VALA methods for Openmano. """
+
+    instance_num = 1
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = vimconn_openvim.vimconnector
+        self.log = logging.getLogger('rwcal.openmano_vimconnector.%s' % RwcalOpenmanoVimConnector.instance_num)
+        self.log.setLevel(logging.DEBUG)
+        self._rwlog_handler = None
+        self._tenant_name = None
+        RwcalOpenmanoVimConnector.instance_num += 1
+
+    @contextlib.contextmanager
+    def _use_driver(self, account):
+        #if self._rwlog_handler is None:
+        #    raise UninitializedPluginError("Must call init() in CAL plugin before use.")
+
+        #with rwlogger.rwlog_root_handler(self._rwlog_handler):
+            try:
+                if self._tenant_name != account.openvim.tenant_name:
+                    tmp_drv = self._driver_class(uuid = '',
+                                  name  = '',
+                                  #tenant_id  = account.openvim.tenant_id,
+                                  tenant_id  = '',
+                                  tenant_name = '',
+                                  url   ='http://{}:{}/openvim'.format(account.openvim.host,account.openvim.port),
+                                  url_admin = '')
+                    tenant_dict = {'name':account.openvim.tenant_name}
+                    tenant_list = tmp_drv.get_tenant_list(tenant_dict)
+                    if len(tenant_list) == 0:
+                        tmp_drv.new_tenant(account.openvim.tenant_name,"default tenant")
+                        self._tenant_name = account.openvim.tenant_name 
+                    else:
+                        self._tenant_name = account.openvim.tenant_name
+                  
+                     
+                drv = self._driver_class(uuid = '',
+                                  name  = '',
+                                  #tenant_id  = account.openvim.tenant_id,
+                                  tenant_id  = '',
+                                  tenant_name = account.openvim.tenant_name,
+                                  url   ='http://{}:{}/openvim'.format(account.openvim.host,account.openvim.port),
+                                  url_admin = '')
+
+            except Exception as e:
+                self.log.error("RwcalOpenmanoVimConnectorPlugin: VimConnector init failed. Exception: %s" %(str(e)))
+                raise
+
+            yield drv
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="openmano_vimconnector",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        If creds are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus()
+        url = 'http://{}:{}/openvim/'.format(account.openvim.host,account.openvim.port)
+        try:
+            r=requests.get(url,timeout=3)
+            r.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            self.log.error("OpenvimConnectorPlugin: Openvim account credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+        except Exception as e:
+            self.log.error("OpenvimConnectorPlugin: Openvim connection failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Connection Failed (Invlaid URL): %s" % str(e)
+        else:
+            self.log.debug("Openvim Successfully connected")
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        with self._use_driver(account) as drv:
+            return drv.new_tenant(name, "New CAL teannt");
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        with self._use_driver(account) as drv:
+            drv.delete_tenant(tenant_id);
+
+    @staticmethod
+    def _fill_tenant_info(tenant_info):
+        """Create a GI object from tenant info dictionary
+
+        Converts tenant information dictionary object returned by openmano vimconnector
+        driver into Protobuf Gi Object
+
+        Arguments:
+            tenant_info - tenant information dictionary object
+
+        Returns:
+            The TenantInfoItem
+        """
+        tenant = RwcalYang.TenantInfoItem()
+        tenant.tenant_name = tenant_info['name']
+        tenant.tenant_id = tenant_info['id']
+        return tenant
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            tenants = drv.get_tenant_list()
+        for tenant in tenants:
+            response.tenantinfo_list.append(RwcalOpenmanoVimConnector._fill_tenant_info(tenant))
+        return response
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        with self._use_driver(account) as drv:
+            try:
+                # If the use passed in a file descriptor, use that to
+                # upload the image.
+                if image.has_field("fileno"):
+                    new_fileno = os.dup(image.fileno)
+                    hdl = os.fdopen(new_fileno, 'rb')
+                else:
+                    hdl = open(image.location, "rb")
+            except Exception as e:
+                self.log.error("Could not open file for upload. Exception received: %s", str(e))
+                raise
+
+            tpt = paramiko.Transport((account.openvim.host, 22))
+            try:
+                tpt.connect(username=account.openvim.image_management.username,
+                            password=account.openvim.image_management.password)
+            except Exception as e:
+                self.log.error('Could not connect to openvim host: %s. Exception: %s', account.openvim.host, e)
+                return
+
+            sftp = paramiko.SFTPClient.from_transport(tpt)
+            destination = account.openvim.image_management.image_directory_path.rstrip('/')+'/'+image.name
+            with hdl as fd:
+                try:
+                    sftp.putfo(fd, destination)
+                except Exception as e:
+                    self.log.warn('*** Caught exception: %s: %s', e.__class__, e)
+                finally:
+                    sftp.close()
+                    tpt.close()
+
+            image_dict = {}
+            image_dict['name'] = image.name
+            image_dict['location'] = destination
+            image_id = drv.new_image(image_dict)
+        return image_id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        with self._use_driver(account) as drv:
+            drv.delete_image(image_id)
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info['name']
+        img.id = img_info['id']
+        img.location = img_info['path']
+        if img_info['status'] == 'ACTIVE':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        with self._use_driver(account) as drv:
+            image = drv.get_image(image_id)
+        return RwcalOpenmanoVimConnector._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            images = drv.get_image_list()
+        for img in images:
+            image_info = drv.get_image(img['id'])
+            response.imageinfo_list.append(RwcalOpenmanoVimConnector._fill_image_info(image_info))
+        return response
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        return RwcalYang.VimResources()
+
+    def _fill_flavor_create_attributes(flavor):
+        flavor_dict = dict()
+        flavor_dict['name'] = flavor.name
+        flavor_dict['ram'] = flavor.vm_flavor.memory_mb
+        flavor_dict['disk'] = flavor.vm_flavor.storage_gb
+        flavor_dict['vcpus'] = flavor.vm_flavor.vcpu_count 
+        return flavor_dict
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        with self._use_driver(account) as drv:
+            flavor_dict = RwcalOpenmanoVimConnector._fill_flavor_create_attributes(flavor) 
+            flavor_id = drv.new_flavor(flavor_dict)
+        return flavor_id
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        with self._use_driver(account) as drv:
+            drv.delete_flavor(flavor_id)
+
+    @staticmethod
+    def _fill_epa_attributes(flavor, flavor_info):
+        if 'ram' in flavor_info and flavor_info['ram']:
+            getattr(flavor, 'vm_flavor').memory_mb   = flavor_info.get('ram',0)
+        if 'disk' in flavor_info and flavor_info['disk']:
+            getattr(flavor, 'vm_flavor').storage_gb  = flavor_info.get('disk',0)
+        if 'vcpus' in flavor_info and flavor_info['vcpus']:
+            getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info.get('vcpus',0)
+
+        if not 'extended' in flavor_info or flavor_info['extended'] is None:
+            return
+        getattr(flavor,'guest_epa').numa_node_policy.node_cnt = len(flavor_info['extended']['numas'])
+        for attr in flavor_info['extended']['numas']:
+            numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+            numa_node.memory_mb = attr.get('memory',0)*1024
+            #getattr(flavor, 'host_epa').cpu_core_thread_count =
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info['name']
+        flavor.id                         = flavor_info['id']
+        RwcalOpenmanoVimConnector._fill_epa_attributes(flavor, flavor_info)
+        return flavor
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        with self._use_driver(account) as drv:
+            flavor = drv.get_flavor(flavor_id)
+        return RwcalOpenmanoVimConnector._fill_flavor_info(flavor)
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            flavors = drv.get_flavor_list()
+        for flav in flavors:
+            flav_info = drv.get_flavor(flav['id'])
+            response.flavorinfo_list.append(RwcalOpenmanoVimConnector._fill_flavor_info(flav_info))
+        return response
+
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        return RwcalYang.VimResources()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        with self._use_driver(account) as drv:
+            network_id = drv.new_network(network.name,'bridge_man')
+            return network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        with self._use_driver(account) as drv:
+            drv.delete_network(network_id)
+
+    def _fill_network_info(self, network_info):
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_name     = network_info['name']
+        network.network_id       = network_info['id']
+        if ('provider:physical' in network_info) and (network_info['provider:physical']):
+            network.provider_network.physical_network = network_info['provider:physical'].upper()
+        if ('provider:vlan' in network_info) and (network_info['provider:vlan']):
+            network.provider_network.segmentation_id = network_info['provider:vlan']
+            network.provider_network.overlay_type = 'vlan'
+        return network
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        with self._use_driver(account) as drv:
+            network = drv.get_network(id)
+        return self._fill_network_info(network)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            networks = drv.get_network_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network))
+        return response
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        with self._use_driver(account) as drv:
+            net = dict()
+            if link_params.provider_network.physical_network is not None:
+                net['provider:physical'] = link_params.provider_network.physical_network
+            #else:
+            #    net['provider:physical'] = 'default'
+            if link_params.provider_network.overlay_type == 'VLAN' and link_params.provider_network.segmentation_id:
+                net['provider:vlan'] = link_params.provider_network.segmentation_id
+            network_id = drv.new_network(link_params.name,'bridge_man',shared=False,**net)
+            return network_id
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        with self._use_driver(account) as drv:
+            drv.delete_network(link_id)
+
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        c_point.name = port_info['name']
+        c_point.connection_point_id = port_info['id']
+        if 'ip_address' in port_info:
+                c_point.ip_address = port_info['ip_address']
+        if port_info['status'] == 'ACTIVE':
+            c_point.state = 'active'
+        else:
+            c_point.state = 'inactive'
+        if 'network_id' in port_info:
+            c_point.virtual_link_id = port_info['network_id']
+        if ('device_id' in port_info) and (port_info['device_id']):
+            c_point.vdu_id = port_info['device_id']
+
+    def _fill_virtual_link_info(self, drv, network_info):
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name     = network_info['name']
+        link.virtual_link_id       = network_info['id']
+        if network_info['admin_state_up']:
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info['id']
+        if ('provider:physical' in network_info) and (network_info['provider:physical']):
+            link.provider_network.physical_network = network_info['provider:physical']
+        if ('provider:vlan' in network_info) and (network_info['provider:vlan']):
+            link.provider_network.segmentation_id = network_info['provider:vlan']
+            link.provider_network.overlay_type = 'VLAN'
+
+        if 'ports' in network_info:
+            for port in network_info['ports']:
+                if 'port_id' in port:
+                    port_id = port['port_id']
+                    port = drv.get_port(port_id)
+                    c_point = link.connection_points.add()
+                    RwcalOpenmanoVimConnector._fill_connection_point_info(c_point, port)
+        return link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        with self._use_driver(account) as drv:
+            network = drv.get_network(link_id)
+        return self._fill_virtual_link_info(drv,network)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        response = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            networks = drv.get_network_list()
+        for network in networks:
+            network_info = drv.get_network(network['id'])
+            response.virtual_link_info_list.append(self._fill_virtual_link_info(drv,network_info))
+        return response
+
+    def _match_vm_flavor(self, required, available):
+        self.log.info("Matching VM Flavor attributes required {}, available {}".format(required, available))
+        if available.vcpu_count != required.vcpu_count:
+            return False
+        if available.memory_mb != required.memory_mb:
+            return False
+        if available.storage_gb != required.storage_gb:
+            return False
+        self.log.debug("VM Flavor match found")
+        return True
+
+
+    def _select_resource_flavor(self, account, vdu_init):
+        """ 
+            Select a existing flavor if it matches the request or create new flavor
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
+        epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+        rc, response = self.do_get_flavor_list(account)
+        if rc != RwTypes.RwStatus.SUCCESS:
+            self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                        account.name)
+            raise OpenvimCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
+
+        flavor_id = None
+        flavor_list = response.flavorinfo_list
+        self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
+        for flv in flavor_list:
+            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                       vdu_init.name, flv)
+            if self._match_vm_flavor(flavor.vm_flavor,flv.vm_flavor):
+                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                           vdu_init.name, flv.name, flv.id)
+                return flv.id
+
+        if account.openvim.dynamic_flavor_support is False:
+            self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
+            raise OpenvimCALOperationFailure("No resource available with matching EPA attributes")
+        else:
+            rc,flavor_id = self.do_create_flavor(account,flavor)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self.log.error("Create-flavor operation failed for cloud account: %s",
+                        account.name)
+                raise OpenvimCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
+            return flavor_id
+
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        with self._use_driver(account) as drv:
+            net_list = list()
+
+            if not vdu_init.has_field('flavor_id'):
+                vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
+
+            if account.openvim.mgmt_network:
+                mgmt_net_list = drv.get_network_list()
+                mgmt_net_id = [net['id'] for net in mgmt_net_list if net['name'] == account.openvim.mgmt_network]
+                if len(mgmt_net_id) > 0:
+                    mgmt_net_dict = {}
+                    mgmt_net_dict['name'] = account.openvim.mgmt_network
+                    mgmt_net_dict['net_id'] = mgmt_net_id[0]
+                    mgmt_net_dict['type'] = 'virtual'
+                    net_list.append(mgmt_net_dict)
+                
+            for c_point in vdu_init.connection_points:
+                net_dict = {}
+                net_dict['name'] = c_point.name
+                net_dict['net_id'] = c_point.virtual_link_id
+                net_dict['type'] = 'virtual'
+                net_list.append(net_dict)
+
+            vm_id = drv.new_vminstance(vdu_init.name,vdu_init.name,None,vdu_init.image_id,vdu_init.flavor_id,net_list);
+            return vm_id
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        pass
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        if not vdu_id:
+            self.log.error("empty vdu_id during the vdu deletion")
+            return
+
+        with self._use_driver(account) as drv:
+            drv.delete_vminstance(vdu_id)
+
+    @staticmethod
+    def _fill_vdu_info(drv,account,vm_info):
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        mgmt_net_id = None
+        if ('image' in vm_info) and ('id' in vm_info['image']):
+            vdu.image_id = vm_info['image']['id']
+        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
+            vdu.flavor_id = vm_info['flavor']['id']
+        vdu.cloud_type  = 'openvim'
+
+        if account.openvim.mgmt_network:
+            net_list = drv.get_network_list()
+            mgmt_net_list = [net['id'] for net in net_list if net['name'] == account.openvim.mgmt_network]
+            if len(mgmt_net_list) > 0:
+                mgmt_net_id = mgmt_net_list[0]
+
+        if 'networks' in vm_info:
+            for network in vm_info['networks']:
+                port_id = network['iface_id']
+                port = drv.get_port(port_id)
+                if 'network_id' in port and mgmt_net_id == port['network_id'] and 'ip_address' in port:
+                    vdu.management_ip = port['ip_address']
+                    vdu.public_ip = vdu.management_ip
+                else:
+                    c_point = vdu.connection_points.add()
+                    RwcalOpenmanoVimConnector._fill_connection_point_info(c_point, port)
+
+
+        if vm_info['status'] == 'ACTIVE' and vdu.management_ip != '':
+            vdu.state = 'active'
+        elif vm_info['status'] == 'ERROR':
+            vdu.state = 'failed'
+        else:
+            vdu.state = 'inactive'
+
+        if vdu.flavor_id:
+           flavor = drv.get_flavor(vdu.flavor_id)
+           RwcalOpenmanoVimConnector._fill_epa_attributes(vdu, flavor)
+        return vdu
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        with self._use_driver(account) as drv:
+            vm_info = drv.get_vminstance(vdu_id)
+        return  RwcalOpenmanoVimConnector._fill_vdu_info(drv,account,vm_info)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        vnf_resource = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            vms = drv.get_vminstance_list()
+        for vm in vms:
+            vm_info = drv.get_vminstance(vm['id'])
+            vdu = RwcalOpenmanoVimConnector._fill_vdu_info(drv,account,vm_info)
+            vnf_resource.vdu_info_list.append(vdu)
+        return vnf_resource
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt b/rwcal/plugins/vala/rwcal_openstack/CMakeLists.txt
new file mode 100644 (file)
index 0000000..af92d7d
--- /dev/null
@@ -0,0 +1,36 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+### rwcal-openstack package
+set(PKG_NAME rwcal-openstack)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+rift_install_python_plugin(rwcal_openstack rwcal_openstack.py)
+
+rift_python_install_tree(
+  FILES
+    rift/rwcal/openstack/__init__.py
+    rift/rwcal/openstack/openstack_drv.py
+    rift/rwcal/openstack/openstack_utils.py
+    rift/rwcal/openstack/prepare_vm.py
+  PYTHON3_ONLY
+  COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/Makefile b/rwcal/plugins/vala/rwcal_openstack/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/__init__.py
new file mode 100644 (file)
index 0000000..3226655
--- /dev/null
@@ -0,0 +1,22 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from .openstack_drv import (
+        OpenstackDriver,
+        ValidationError
+        )
+from .openstack_utils import OpenstackExtraSpecUtils
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_drv.py
new file mode 100644 (file)
index 0000000..2505da3
--- /dev/null
@@ -0,0 +1,1997 @@
+#!/usr/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+import logging
+import ipaddress
+
+from keystoneclient import v3 as ksclientv3
+from keystoneclient.v2_0 import client as ksclientv2
+from novaclient import client as nova_client
+from neutronclient.neutron import client as ntclient
+from glanceclient.v2 import client as glclient
+from ceilometerclient import client as ceilo_client
+
+# Exceptions
+import novaclient.exceptions as NovaException
+import keystoneclient.exceptions as KeystoneExceptions
+import neutronclient.common.exceptions as NeutronException
+import glanceclient.exc as GlanceException
+
+logger = logging.getLogger('rwcal.openstack.drv')
+logger.setLevel(logging.DEBUG)
+
+class ValidationError(Exception):
+    pass
+
+
+class KeystoneDriver(object):
+    """
+    Driver base-class for keystoneclient APIs
+    """
+    def __init__(self, ksclient):
+        """
+        Constructor for KeystoneDriver base class
+        Arguments: None
+        Returns: None
+        """
+        self.ksclient = ksclient
+
+    def get_username(self):
+        """
+        Returns the username associated with keystoneclient connection
+        """
+        return self._username
+
+    def get_password(self):
+        """
+        Returns the password associated with keystoneclient connection
+        """
+        return self._password
+
+    def get_tenant_name(self):
+        """
+        Returns the tenant name associated with keystoneclient connection
+        """
+        return self._tenant_name
+
+    def _get_keystone_connection(self):
+        """
+        Returns object of class python-keystoneclient class
+        """
+        if not hasattr(self, '_keystone_connection'):
+            self._keystone_connection = self.ksclient(**self._get_keystone_credentials())
+        return self._keystone_connection
+
+    def is_auth_token_valid(self, token_expiry, time_fmt):
+        """
+        Performs validity on auth_token
+        Arguments:
+          token_expiry (string): Expiry time for token
+          time_fmt (string)    : Format for expiry string in auth_ref
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        import time
+        import datetime
+        import dateutil.parser
+        try:
+            now = datetime.datetime.timetuple(datetime.datetime.utcnow())
+            expires_at = dateutil.parser.parse(token_expiry)
+            t_now = time.mktime(now)
+            t_expiry = time.mktime(expires_at.timetuple())
+
+            if (t_expiry <= t_now) or ((t_expiry - t_now) < 300 ):
+                ### Token has expired or about to expire (5 minute)
+                delattr(self, '_keystone_connection')
+                return False
+            else:
+                return True
+        except Exception as e:
+            logger.error("Received except %s during auth_token validity check" %str(e))
+            logger.info("Can not validate the auth_token. Assuming invalid")
+            return False
+
+
+    def get_service_endpoint(self, service_type, endpoint_type):
+        """
+        Returns requested type of endpoint for requested service type
+        Arguments:
+          service_type (string): Service Type (e.g. computev3, image, network)
+          endpoint_type(string): Endpoint Type (e.g. publicURL,adminURL,internalURL)
+        Returns:
+          service_endpoint(string): Service endpoint string
+        """
+        endpoint_kwargs   = {'service_type'  : service_type,
+                             'endpoint_type' : endpoint_type}
+        try:
+            ksconn = self._get_keystone_connection()
+            service_endpoint  = ksconn.service_catalog.url_for(**endpoint_kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Service Catalog discovery operation failed for service_type: %s, endpoint_type: %s. Exception: %s" %(service_type, endpoint_type, str(e)))
+            raise
+        return service_endpoint
+
+
+    def get_raw_token(self):
+        """
+        Returns a valid raw_auth_token string
+
+        Returns (string): raw_auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        try:
+            raw_token = ksconn.get_raw_token_from_identity_service(auth_url = self._auth_url,
+                                                                   token    = self.get_auth_token())
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: get_raw_token_from_identity_service Failure. Exception: %s" %(str(e)))
+            return None
+
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not retrieve raw_token. Exception: %s" %(str(e)))
+
+        return raw_token
+
+    def get_tenant_id(self):
+        """
+        Returns tenant_id for the project/tenant. Tenant name is provided during
+        class instantiation
+
+        Returns (string): Tenant ID
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.tenant_id
+
+    def get_security_mode(self):
+        """
+        Returns certificate_validation policy in case of SSL/TLS connection.
+        This policy is provided during class instantiation
+
+        Returns (boolean):
+        The boolean returned are designed to match the python-client class instantiation ("insecure") value.
+        for nova/neutron/glance/keystone clients
+
+        True: No certificate validation required -- Insecure mode
+        False: Certificate validation required -- Secure mode
+        """
+        return self._insecure
+
+    def tenant_list(self):
+        """
+        Returns list of tenants
+        """
+        pass
+
+    def tenant_create(self, name):
+        """
+        Create a new tenant
+        """
+        pass
+
+    def tenant_delete(self, tenant_id):
+        """
+        Deletes a tenant identified by tenant_id
+        """
+        pass
+
+    def roles_list(self):
+        pass
+
+    def roles_create(self):
+        pass
+
+    def roles_delete(self):
+        pass
+
+class KeystoneDriverV2(KeystoneDriver):
+    """
+    Driver class for keystoneclient V2 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name, insecure):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username    = username
+        self._password    = password
+        self._auth_url    = auth_url
+        self._tenant_name = tenant_name
+        self._insecure    = insecure
+        super(KeystoneDriverV2, self).__init__(ksclientv2.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds                 = {}
+        #creds['user_domain'] = self._domain_name
+        creds['username']     = self._username
+        creds['password']     = self._password
+        creds['auth_url']     = self._auth_url
+        creds['tenant_name']  = self._tenant_name
+        creds['insecure']     = self.get_security_mode()
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_token
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV2, self).is_auth_token_valid(ksconn.auth_ref['token']['expires'],
+                                                                   "%Y-%m-%dT%H:%M:%SZ")
+        return result
+
+
+class KeystoneDriverV3(KeystoneDriver):
+    """
+    Driver class for keystoneclient V3 APIs
+    """
+    def __init__(self, username, password, auth_url,tenant_name, insecure):
+        """
+        Constructor for KeystoneDriverV3 class
+        Arguments:
+        username (string)  : Username
+        password (string)  : Password
+        auth_url (string)  : Authentication URL
+        tenant_name(string): Tenant Name
+
+        Returns: None
+        """
+        self._username    = username
+        self._password    = password
+        self._auth_url    = auth_url
+        self._tenant_name = tenant_name
+        self._insecure    = insecure
+        super(KeystoneDriverV3, self).__init__(ksclientv3.Client)
+
+    def _get_keystone_credentials(self):
+        """
+        Returns the dictionary of kwargs required to instantiate python-keystoneclient class
+        """
+        creds                 = {}
+        #creds['user_domain'] = self._domain_name
+        creds['username']     = self._username
+        creds['password']     = self._password
+        creds['auth_url']     = self._auth_url
+        creds['project_name'] = self._tenant_name
+        creds['insecure']     = self._insecure
+        return creds
+
+    def get_auth_token(self):
+        """
+        Returns a valid auth_token
+
+        Returns (string): auth_token string
+        """
+        ksconn = self._get_keystone_connection()
+        return ksconn.auth_ref['auth_token']
+
+    def is_auth_token_valid(self):
+        """
+        Performs validity on auth_token
+        Arguments:
+
+        Returns:
+        True/False (Boolean):  (auth_token is valid or auth_token is invalid)
+        """
+        ksconn = self._get_keystone_connection()
+        result = super(KeystoneDriverV3, self).is_auth_token_valid(ksconn.auth_ref['expires_at'],
+                                                                   "%Y-%m-%dT%H:%M:%S.%fZ")
+        return result
+
+class NovaDriver(object):
+    """
+    Driver for openstack nova_client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_nova_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-novaclient class
+        """
+        creds               = {}
+        creds['version']    = self._version
+        creds['bypass_url'] = self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        creds['username']   = self.ks_drv.get_username()
+        creds['project_id'] = self.ks_drv.get_tenant_name()
+        creds['auth_token'] = self.ks_drv.get_auth_token()
+        creds['insecure']   = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_nova_connection(self):
+        """
+        Returns an object of class python-novaclient
+        """
+        if not hasattr(self, '_nova_connection'):
+            self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._nova_connection = nova_client.Client(**self._get_nova_credentials())
+        return self._nova_connection
+
+    def _flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.get(flavor_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Did not find flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        try:
+            extra_specs = flavor.get_keys()
+        except Exception as e:
+            logger.info("OpenstackDriver: Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s"%(flavor_id, str(e)))
+            raise
+
+        response = flavor.to_dict()
+        assert 'extra_specs' not in response, "Key extra_specs present as flavor attribute"
+        response['extra_specs'] = extra_specs
+        return response
+
+    def flavor_get(self, flavor_id):
+        """
+        Get flavor by flavor_id
+        Arguments:
+           flavor_id(string): UUID of flavor_id
+
+        Returns:
+        dictionary of flavor parameters
+        """
+        return self._flavor_get(flavor_id)
+
+    def flavor_list(self):
+        """
+        Returns list of all flavors (dictionary per flavor)
+
+        Arguments:
+           None
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes for a single flavor instance
+        """
+        flavors = []
+        flavor_info = []
+        nvconn =  self._get_nova_connection()
+        try:
+            flavors = nvconn.flavors.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+        if flavors:
+            flavor_info = [ self.flavor_get(flv.id) for flv in flavors ]
+        return flavor_info
+
+    def flavor_create(self, name, ram, vcpu, disk, extra_specs):
+        """
+        Create a new flavor
+
+        Arguments:
+           name   (string):  Name of the new flavor
+           ram    (int)   :  Memory in MB
+           vcpus  (int)   :  Number of VCPUs
+           disk   (int)   :  Secondary storage size in GB
+           extra_specs (dictionary): EPA attributes dictionary
+
+        Returns:
+           flavor_id (string): UUID of flavor created
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            flavor = nvconn.flavors.create(name        = name,
+                                           ram         = ram,
+                                           vcpus       = vcpu,
+                                           disk        = disk,
+                                           flavorid    = 'auto',
+                                           ephemeral   = 0,
+                                           swap        = 0,
+                                           rxtx_factor = 1.0,
+                                           is_public    = True)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Flavor operation failed. Exception: %s"%(str(e)))
+            raise
+
+        if extra_specs:
+            try:
+                flavor.set_keys(extra_specs)
+            except Exception as e:
+                logger.error("OpenstackDriver: Set Key operation failed for flavor: %s. Exception: %s" %(flavor.id, str(e)))
+                raise
+        return flavor.id
+
+    def flavor_delete(self, flavor_id):
+        """
+        Deletes a flavor identified by flavor_id
+
+        Arguments:
+           flavor_id (string):  UUID of flavor to be deleted
+
+        Returns: None
+        """
+        assert flavor_id == self._flavor_get(flavor_id)['id']
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.flavors.delete(flavor_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete flavor operation failed for flavor: %s. Exception: %s" %(flavor_id, str(e)))
+            raise
+
+
+    def server_list(self):
+        """
+        Returns a list of available VMs for the project
+
+        Arguments: None
+
+        Returns:
+           A list of dictionaries. Each dictionary contains attributes associated
+           with individual VM
+        """
+        servers     = []
+        server_info = []
+        nvconn      = self._get_nova_connection()
+        try:
+            servers     = nvconn.servers.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Server operation failed. Exception: %s" %(str(e)))
+            raise
+        server_info = [ server.to_dict() for server in servers]
+        return server_info
+
+    def _nova_server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        nvconn = self._get_nova_connection()
+        try:
+            server = nvconn.servers.get(server = server_id)
+        except Exception as e:
+            logger.info("OpenstackDriver: Get Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+        else:
+            return server.to_dict()
+
+    def server_get(self, server_id):
+        """
+        Returns a dictionary of attributes associated with VM identified by service_id
+
+        Arguments:
+          server_id (string): UUID of the VM/server for which information is requested
+
+        Returns:
+          A dictionary object with attributes associated with VM identified by server_id
+        """
+        return self._nova_server_get(server_id)
+
+    def server_create(self, **kwargs):
+        """
+        Creates a new VM/server instance
+
+        Arguments:
+          A dictionary of following key-value pairs
+         {
+           server_name(string)        : Name of the VM/Server
+           flavor_id  (string)        : UUID of the flavor to be used for VM
+           image_id   (string)        : UUID of the image to be used VM/Server instance
+           network_list(List)         : A List of network_ids. A port will be created in these networks
+           port_list (List)           : A List of port-ids. These ports will be added to VM.
+           metadata   (dict)          : A dictionary of arbitrary key-value pairs associated with VM/server
+           userdata   (string)        : A script which shall be executed during first boot of the VM
+           availability_zone (string) : A name of the availability zone where instance should be launched
+           scheduler_hints (string)   : Openstack scheduler_hints to be passed to nova scheduler
+         }
+        Returns:
+          server_id (string): UUID of the VM/server created
+
+        """
+        nics = []
+        if 'network_list' in kwargs:
+            for network_id in kwargs['network_list']:
+                nics.append({'net-id': network_id})
+
+        if 'port_list' in kwargs:
+            for port_id in kwargs['port_list']:
+                nics.append({'port-id': port_id})
+
+        nvconn = self._get_nova_connection()
+
+        try:
+            server = nvconn.servers.create(kwargs['name'],
+                                           kwargs['image_id'],
+                                           kwargs['flavor_id'],
+                                           meta                 = kwargs['metadata'],
+                                           files                = None,
+                                           reservation_id       = None,
+                                           min_count            = None,
+                                           max_count            = None,
+                                           userdata             = kwargs['userdata'],
+                                           security_groups      = kwargs['security_groups'],
+                                           availability_zone    = kwargs['availability_zone'],
+                                           block_device_mapping = None,
+                                           nics                 = nics,
+                                           scheduler_hints      = kwargs['scheduler_hints'],
+                                           config_drive         = None)
+        except Exception as e:
+            logger.info("OpenstackDriver: Create Server operation failed. Exception: %s" %(str(e)))
+            raise
+        return server.to_dict()['id']
+
+    def server_delete(self, server_id):
+        """
+        Deletes a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be deleted
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.delete(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_start(self, server_id):
+        """
+        Starts a server identified by server_id
+
+        Arguments:
+           server_id (string): UUID of the server to be started
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.start(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Start Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_stop(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be stopped
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.stop(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Stop Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_pause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be paused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.pause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Pause Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_unpause(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be unpaused
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.unpause(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_suspend(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be suspended
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.suspend(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Suspend Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+
+
+    def server_resume(self, server_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server to be resumed
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.resume(server_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Resume Server operation failed for server_id : %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_reboot(self, server_id, reboot_type):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           reboot_type(string):
+                         'SOFT': Soft Reboot
+                         'HARD': Hard Reboot
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.reboot(server_id, reboot_type)
+        except Exception as e:
+            logger.error("OpenstackDriver: Reboot Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+    def server_console(self, server_id, console_type = 'novnc'):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           console_type(string):
+                               'novnc',
+                               'xvpvnc'
+        Returns:
+          A dictionary object response for console information
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            console_info = nvconn.servers.get_vnc_console(server_id, console_type)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Get-Console operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+        return console_info
+
+    def server_rebuild(self, server_id, image_id):
+        """
+        Arguments:
+           server_id (string) : UUID of the server to be rebooted
+           image_id (string)  : UUID of the image to use
+        Returns: None
+        """
+
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.rebuild(server_id, image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Rebuild Server operation failed for server_id: %s. Exception: %s" %(server_id, str(e)))
+            raise
+
+
+    def server_add_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be attached
+
+        Returns: None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_attach(server_id,
+                                            port_id,
+                                            net_id = None,
+                                            fixed_ip = None)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Add operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def server_delete_port(self, server_id, port_id):
+        """
+        Arguments:
+           server_id (string): UUID of the server
+           port_id   (string): UUID of the port to be deleted
+        Returns: None
+
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.interface_detach(server_id, port_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Port Delete operation failed for server_id : %s, port_id : %s. Exception: %s" %(server_id, port_id, str(e)))
+            raise
+
+    def floating_ip_list(self):
+        """
+        Arguments:
+            None
+        Returns:
+            List of objects of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            ip_list = nvconn.floating_ips.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP List operation failed. Exception: %s" %str(e))
+            raise
+
+        return ip_list
+
+    def floating_ip_create(self, pool):
+        """
+        Arguments:
+           pool (string): Name of the pool (optional)
+        Returns:
+           An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.create(pool)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Create operation failed. Exception: %s"  %str(e))
+            raise
+
+        return floating_ip
+
+    def floating_ip_delete(self, floating_ip):
+        """
+        Arguments:
+           floating_ip: An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            floating_ip = nvconn.floating_ips.delete(floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Floating IP Delete operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+           fixed_ip (string)   : IP address string for the fixed-ip with which floating ip will be associated
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.add_floating_ip(server_id, floating_ip, fixed_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Assign Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+    def floating_ip_release(self, server_id, floating_ip):
+        """
+        Arguments:
+           server_id (string)  : UUID of the server
+           floating_ip (string): IP address string for floating-ip
+        Returns:
+           None
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            nvconn.servers.remove_floating_ip(server_id, floating_ip)
+        except Exception as e:
+            logger.error("OpenstackDriver: Release Floating IP operation failed. Exception: %s"  %str(e))
+            raise
+
+    def group_list(self):
+        """
+        List of Server Affinity and Anti-Affinity Groups
+
+        Arguments:
+            None
+        Returns:
+           List of dictionary objects where dictionary is representation of class (novaclient.v2.server_groups.ServerGroup)
+        """
+        nvconn =  self._get_nova_connection()
+        try:
+            group_list = nvconn.server_groups.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: Server Group List operation failed. Exception: %s"  %str(e))
+            raise
+
+        group_info = [ group.to_dict() for group in group_list ]
+        return group_info
+
+
+
+class NovaDriverV2(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV2, self).__init__(ks_drv, 'compute', '2.0')
+
+class NovaDriverV21(NovaDriver):
+    """
+    Driver class for novaclient V2 APIs
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NovaDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NovaDriverV21, self).__init__(ks_drv, 'computev21', '2.1')
+
+class GlanceDriver(object):
+    """
+    Driver for openstack glance-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for GlanceDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_glance_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-glanceclient class
+
+        Arguments: None
+
+        Returns:
+           A dictionary object of arguments
+        """
+        creds             = {}
+        creds['version']  = self._version
+        creds['endpoint'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']    = self.ks_drv.get_auth_token()
+        creds['insecure'] = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_glance_connection(self):
+        """
+        Returns a object of class python-glanceclient
+        """
+        if not hasattr(self, '_glance_connection'):
+            self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._glance_connection = glclient.Client(**self._get_glance_credentials())
+        return self._glance_connection
+
+    def image_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains attributes associated with
+        image
+
+        Arguments: None
+
+        Returns: List of dictionaries.
+        """
+        glconn = self._get_glance_connection()
+        images = []
+        try:
+            image_info = glconn.images.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Image operation failed. Exception: %s" %(str(e)))
+            raise
+        images = [ img for img in image_info ]
+        return images
+
+    def image_create(self, **kwargs):
+        """
+        Creates an image
+        Arguments:
+           A dictionary of kwargs with following keys
+           {
+              'name'(string)         : Name of the image
+              'location'(string)     : URL (http://....) where image is located
+              'disk_format'(string)  : Disk format
+                    Possible values are 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'
+              'container_format'(string): Container format
+                                       Possible values are 'ami', 'ari', 'aki', 'bare', 'ovf'
+              'tags'                 : A list of user tags
+              'checksum'             : The image md5 checksum
+           }
+        Returns:
+           image_id (string)  : UUID of the image
+
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.create(**kwargs)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Image operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return image.id
+
+    def image_upload(self, image_id, fd):
+        """
+        Upload the image
+
+        Arguments:
+            image_id: UUID of the image
+            fd      : File descriptor for the image file
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.upload(image_id, fd)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image upload operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_add_location(self, image_id, location, metadata):
+        """
+        Add image URL location
+
+        Arguments:
+           image_id : UUID of the image
+           location : http URL for the image
+
+        Returns: None
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.add_location(image_id, location, metadata)
+        except Exception as e:
+            logger.error("OpenstackDriver: Image location add operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def image_update(self):
+        pass
+
+    def image_delete(self, image_id):
+        """
+        Delete an image
+
+        Arguments:
+           image_id: UUID of the image
+
+        Returns: None
+
+        """
+        assert image_id == self._image_get(image_id)['id']
+        glconn = self._get_glance_connection()
+        try:
+            glconn.images.delete(image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+
+
+    def _image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        glconn = self._get_glance_connection()
+        try:
+            image = glconn.images.get(image_id)
+        except GlanceException.HTTPBadRequest:
+            # RIFT-14241: The get image request occasionally returns the below message.  Retry in case of bad request exception.
+            # Error code 400.: Message: Bad request syntax ('0').: Error code explanation: 400 = Bad request syntax or unsupported method. (HTTP 400)
+            logger.warning("OpenstackDriver: Got bad request response during get_image request.  Retrying.")
+            image = glconn.images.get(image_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Get Image operation failed for image_id : %s. Exception: %s" %(image_id, str(e)))
+            raise
+
+        return image
+
+    def image_get(self, image_id):
+        """
+        Returns a dictionary object of VM image attributes
+
+        Arguments:
+           image_id (string): UUID of the image
+
+        Returns:
+           A dictionary of the image attributes
+        """
+        return self._image_get(image_id)
+
+class GlanceDriverV2(GlanceDriver):
+    """
+    Driver for openstack glance-client V2
+    """
+    def __init__(self, ks_drv):
+        super(GlanceDriverV2, self).__init__(ks_drv, 'image', 2)
+
+class NeutronDriver(object):
+    """
+    Driver for openstack neutron neutron-client
+    """
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+
+    def _get_neutron_credentials(self):
+        """
+        Returns a dictionary of kwargs required to instantiate python-neutronclient class
+
+        Returns:
+          Dictionary of kwargs
+        """
+        creds                 = {}
+        creds['api_version']  = self._version
+        creds['endpoint_url'] = self.ks_drv.get_service_endpoint(self._service_name, 'publicURL')
+        creds['token']        = self.ks_drv.get_auth_token()
+        creds['tenant_name']  = self.ks_drv.get_tenant_name()
+        creds['insecure']     = self.ks_drv.get_security_mode()
+        return creds
+
+    def _get_neutron_connection(self):
+        """
+        Returns an object of class python-neutronclient
+        """
+        if not hasattr(self, '_neutron_connection'):
+            self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        else:
+            # Reinitialize if auth_token is no longer valid
+            if not self.ks_drv.is_auth_token_valid():
+                self._neutron_connection = ntclient.Client(**self._get_neutron_credentials())
+        return self._neutron_connection
+
+    def network_list(self):
+        """
+        Returns list of dictionaries. Each dictionary contains the attributes for a network
+        under project
+
+        Arguments: None
+
+        Returns:
+          A list of dictionaries
+        """
+        networks = []
+        ntconn   = self._get_neutron_connection()
+        try:
+            networks = ntconn.list_networks()
+        except Exception as e:
+            logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+            raise
+        return networks['networks']
+
+    def network_create(self, **kwargs):
+        """
+        Creates a new network for the project
+
+        Arguments:
+          A dictionary with following key-values
+        {
+          name (string)              : Name of the network
+          admin_state_up(Boolean)    : True/False (Defaults: True)
+          external_router(Boolean)   : Connectivity with external router. True/False (Defaults: False)
+          shared(Boolean)            : Shared among tenants. True/False (Defaults: False)
+          physical_network(string)   : The physical network where this network object is implemented (optional).
+          network_type               : The type of physical network that maps to this network resource (optional).
+                                       Possible values are: 'flat', 'vlan', 'vxlan', 'gre'
+          segmentation_id            : An isolated segment on the physical network. The network_type attribute
+                                       defines the segmentation model. For example, if the network_type value
+                                       is vlan, this ID is a vlan identifier. If the network_type value is gre,
+                                       this ID is a gre key.
+        }
+        """
+        params = {'network':
+                  {'name'                 : kwargs['name'],
+                   'admin_state_up'       : kwargs['admin_state_up'],
+                   'tenant_id'            : self.ks_drv.get_tenant_id(),
+                   'shared'               : kwargs['shared'],
+                   #'port_security_enabled': port_security_enabled,
+                   'router:external'      : kwargs['external_router']}}
+
+        if 'physical_network' in kwargs:
+            params['network']['provider:physical_network'] = kwargs['physical_network']
+        if 'network_type' in kwargs:
+            params['network']['provider:network_type'] = kwargs['network_type']
+        if 'segmentation_id' in kwargs:
+            params['network']['provider:segmentation_id'] = kwargs['segmentation_id']
+
+        ntconn = self._get_neutron_connection()
+        try:
+            logger.debug("Calling neutron create_network() with params: %s", str(params))
+            net = ntconn.create_network(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Network operation failed. Exception: %s" %(str(e)))
+            raise
+        logger.debug("Got create_network response from neutron connection: %s", str(net))
+        network_id = net['network']['id']
+        if not network_id:
+            raise Exception("Empty network id returned from create_network. (params: %s)" % str(params))
+
+        return network_id
+
+    def network_delete(self, network_id):
+        """
+        Deletes a network identified by network_id
+
+        Arguments:
+          network_id (string): UUID of the network
+
+        Returns: None
+        """
+        assert network_id == self._network_get(network_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_network(network_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Network operation failed. Exception: %s" %(str(e)))
+            raise
+
+    def _network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        ntconn = self._get_neutron_connection()
+        network = ntconn.list_networks(id = network_id)['networks']
+        if not network:
+            raise NeutronException.NotFound("Network with id %s not found"%(network_id))
+
+        return network[0]
+
+    def network_get(self, network_id):
+        """
+        Returns a dictionary object describing the attributes of the network
+
+        Arguments:
+           network_id (string): UUID of the network
+
+        Returns:
+           A dictionary object of the network attributes
+        """
+        return self._network_get(network_id)
+
+    def subnet_create(self, **kwargs):
+        """
+        Creates a subnet on the network
+
+        Arguments:
+        A dictionary with following key value pairs
+        {
+          network_id(string)  : UUID of the network where subnet needs to be created
+          subnet_cidr(string) : IPv4 address prefix (e.g. '1.1.1.0/24') for the subnet
+          ip_version (integer): 4 for IPv4 and 6 for IPv6
+        
+        }
+
+        Returns:
+           subnet_id (string): UUID of the created subnet
+        """
+        params = {}
+        params['network_id'] = kwargs['network_id']
+        params['ip_version'] = kwargs['ip_version']
+
+        # if params['ip_version'] == 6:
+        #     assert 0, "IPv6 is not supported"
+        
+        if 'subnetpool_id' in kwargs:
+            params['subnetpool_id'] = kwargs['subnetpool_id']
+        else:
+            params['cidr'] = kwargs['cidr']
+
+        if 'gateway_ip' in kwargs:
+            params['gateway_ip'] = kwargs['gateway_ip']
+        else:
+            params['gateway_ip'] = None
+
+        if 'dhcp_params' in kwargs:
+            params['enable_dhcp'] = kwargs['dhcp_params']['enable_dhcp']
+            if 'start_address' in kwargs['dhcp_params'] and 'count' in kwargs['dhcp_params']:
+                end_address = (ipaddress.IPv4Address(kwargs['dhcp_params']['start_address']) + kwargs['dhcp_params']['count']).compressed
+                params['allocation_pools'] = [ {'start': kwargs['dhcp_params']['start_address'] ,
+                                                'end' : end_address} ]
+                
+        if 'dns_server' in kwargs:
+            params['dns_nameservers'] = []
+            for server in kwargs['dns_server']:
+                params['dns_nameservers'].append(server)
+
+        ntconn = self._get_neutron_connection()
+        try:
+            subnet = ntconn.create_subnet({'subnets': [params]})
+        except Exception as e:
+            logger.error("OpenstackDriver: Create Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+
+        return subnet['subnets'][0]['id']
+
+    def subnet_list(self):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the subnet
+
+        Arguments: None
+
+        Returns:
+           A dictionary of the objects of subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            subnets = ntconn.list_subnets()['subnets']
+        except Exception as e:
+            logger.error("OpenstackDriver: List Subnet operation failed. Exception: %s" %(str(e)))
+            raise
+        return subnets
+
+    def _subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        ntconn = self._get_neutron_connection()
+        subnets = ntconn.list_subnets(id=subnet_id)
+        if not subnets['subnets']:
+            logger.error("OpenstackDriver: Get subnet operation failed for subnet_id: %s" %(subnet_id))
+            #raise NeutronException.NotFound("Could not find subnet_id %s" %(subnet_id))
+            return {'cidr': ''}
+        else:
+            return subnets['subnets'][0]
+
+    def subnet_get(self, subnet_id):
+        """
+        Returns a dictionary object describing the attributes of a subnet.
+
+        Arguments:
+           subnet_id (string): UUID of the subnet
+
+        Returns:
+           A dictionary object of the subnet attributes
+        """
+        return self._subnet_get(subnet_id)
+
+    def subnet_delete(self, subnet_id):
+        """
+        Deletes a subnet identified by subnet_id
+
+        Arguments:
+           subnet_id (string): UUID of the subnet to be deleted
+
+        Returns: None
+        """
+        ntconn = self._get_neutron_connection()
+        assert subnet_id == self._subnet_get(self,subnet_id)
+        try:
+            ntconn.delete_subnet(subnet_id)
+        except Exception as e:
+            logger.error("OpenstackDriver: Delete Subnet operation failed for subnet_id : %s. Exception: %s" %(subnet_id, str(e)))
+            raise
+
+    def port_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the port
+
+        Arguments:
+            kwargs (dictionary): A dictionary for filters for port_list operation
+
+        Returns:
+           A dictionary of the objects of port attributes
+
+        """
+        ports  = []
+        ntconn = self._get_neutron_connection()
+
+        kwargs['tenant_id'] = self.ks_drv.get_tenant_id()
+
+        try:
+            ports  = ntconn.list_ports(**kwargs)
+        except Exception as e:
+            logger.info("OpenstackDriver: List Port operation failed. Exception: %s" %(str(e)))
+            raise
+        return ports['ports']
+
+    def port_create(self, **kwargs):
+        """
+        Create a port in network
+
+        Arguments:
+           A dictionary of following
+           {
+              name (string)      : Name of the port
+              network_id(string) : UUID of the network_id identifying the network to which port belongs
+              subnet_id(string)  : UUID of the subnet_id from which IP-address will be assigned to port
+              vnic_type(string)  : Possible values are "normal", "direct", "macvtap"
+           }
+        Returns:
+           port_id (string)   : UUID of the port
+        """
+        params = {
+            "port": {
+                "admin_state_up"    : kwargs['admin_state_up'],
+                "name"              : kwargs['name'],
+                "network_id"        : kwargs['network_id'],
+                "fixed_ips"         : [ {"subnet_id": kwargs['subnet_id']}],
+                "binding:vnic_type" : kwargs['port_type']}}
+
+        ntconn = self._get_neutron_connection()
+        try:
+            port  = ntconn.create_port(params)
+        except Exception as e:
+            logger.error("OpenstackDriver: Port Create operation failed. Exception: %s" %(str(e)))
+            raise
+        return port['port']['id']
+
+    def _port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        ntconn = self._get_neutron_connection()
+        port   = ntconn.list_ports(id=port_id)['ports']
+        if not port:
+            raise NeutronException.NotFound("Could not find port_id %s" %(port_id))
+        return port[0]
+
+    def port_get(self, port_id):
+        """
+        Returns a dictionary object describing the attributes of the port
+
+        Arguments:
+           port_id (string): UUID of the port
+
+        Returns:
+           A dictionary object of the port attributes
+        """
+        return self._port_get(port_id)
+
+    def port_delete(self, port_id):
+        """
+        Deletes a port identified by port_id
+
+        Arguments:
+           port_id (string) : UUID of the port
+
+        Returns: None
+        """
+        assert port_id == self._port_get(port_id)['id']
+        ntconn = self._get_neutron_connection()
+        try:
+            ntconn.delete_port(port_id)
+        except Exception as e:
+            logger.error("Port Delete operation failed for port_id : %s. Exception: %s" %(port_id, str(e)))
+            raise
+
+    def security_group_list(self):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing the security group
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of security group attributes
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            group_list = ntconn.list_security_groups(tenant_id=self.ks_drv.get_tenant_id())
+        except Exception as e:
+            logger.error("List Security group operation, Exception: %s" %(str(e)))
+            raise
+
+        if 'security_groups' in group_list:
+            return group_list['security_groups']
+        else:
+            return []
+
+    def subnetpool_list(self, **kwargs):
+        """
+        Returns a list of dictionaries. Each dictionary contains attributes describing a subnet prefix pool
+
+        Arguments:
+           None
+
+        Returns:
+           A dictionary of the objects of subnet prefix pool
+        """
+        ntconn = self._get_neutron_connection()
+        try:
+            pool_list = ntconn.list_subnetpools(**kwargs)
+        except Exception as e:
+            logger.error("List SubnetPool operation, Exception: %s" %(str(e)))
+            raise
+
+        if 'subnetpools' in pool_list:
+            return pool_list['subnetpools']
+        else:
+            return []
+        
+class NeutronDriverV2(NeutronDriver):
+    """
+    Driver for openstack neutron neutron-client v2
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for NeutronDriver
+        Arguments: KeystoneDriver class object
+        """
+        super(NeutronDriverV2, self).__init__(ks_drv, 'network', '2.0')
+
+
+
+class CeilometerDriver(object):
+    """
+    Driver for openstack ceilometer client
+    """
+
+    def __init__(self, ks_drv, service_name, version):
+        """
+        Constructor for CeilometerDriver
+        Arguments: KeystoneDriver class object
+        """
+        self.ks_drv = ks_drv
+        self._service_name = service_name
+        self._version = version
+        self._client = None
+
+    @property
+    def version(self):
+        """The version of the ceilometer client used by the driver"""
+        return self._version
+
+    @property
+    def client(self):
+        """The instance of ceilometer client used by the driver"""
+        if self._client is None or not self.ks_drv.is_auth_token_valid():
+            self._client = ceilo_client.Client(**self.credentials)
+
+        return self._client
+
+    @property
+    def auth_token(self):
+        """The authorization token for the ceilometer client"""
+        try:
+            return self.ks_drv.get_auth_token()
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get authorization token for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def security_mode(self):
+        """The security mode for the ceilometer client"""
+        try:
+            return self.ks_drv.get_security_mode()
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get security mode for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def endpoint(self):
+        """The service endpoint for the ceilometer client"""
+        try:
+            return self.ks_drv.get_service_endpoint(self._service_name, "publicURL")
+        except KeystoneExceptions.EndpointNotFound as e:
+            logger.error("OpenstackDriver: unable to get endpoint for ceilometer. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def credentials(self):
+        """A dictionary of credentials for the ceilometer client"""
+        return dict(
+                version=self.version,
+                endpoint=self.endpoint,
+                token=self.auth_token,
+                insecure=self.security_mode,
+                )
+
+    @property
+    def meters(self):
+        """A list of the available meters"""
+        try:
+            return self.client.meters.list()
+        except Exception as e:
+            logger.error("OpenstackDriver: List meters operation failed. Exception: %s" %(str(e)))
+            raise
+
+    @property
+    def alarms(self):
+        """The ceilometer client alarms manager"""
+        return self.client.alarms
+
+    def query_samples(self, vim_instance_id, counter_name, limit=1):
+        """Returns a list of samples
+
+        Arguments:
+            vim_instance_id - the ID of the VIM that the samples are from
+            counter_name    - the counter that the samples will come from
+            limit           - a limit on the number of samples to return
+                              (default: 1)
+
+        Returns:
+            A list of samples
+
+        """
+        try:
+            filter = json.dumps({
+                "and": [
+                    {"=": {"resource": vim_instance_id}},
+                    {"=": {"counter_name": counter_name}}
+                    ]
+                })
+            result = self.client.query_samples.query(filter=filter, limit=limit)
+            return result[-limit:]
+
+        except Exception as e:
+            logger.exception(e)
+
+        return []
+
+
+class CeilometerDriverV2(CeilometerDriver):
+    """
+    Driver for openstack ceilometer ceilometer-client
+    """
+    def __init__(self, ks_drv):
+        """
+        Constructor for CeilometerDriver
+        Arguments: CeilometerDriver class object
+        """
+        super(CeilometerDriverV2, self).__init__(ks_drv, 'metering', '2')
+
+class OpenstackDriver(object):
+    """
+    Driver for openstack nova, neutron, glance, keystone, swift, cinder services
+    """
+    def __init__(self, username, password, auth_url, tenant_name, mgmt_network = None, cert_validate = False):
+        """
+        OpenstackDriver Driver constructor
+        Arguments:
+          username (string)                   : Username for project/tenant.
+          password (string)                   : Password
+          auth_url (string)                   : Keystone Authentication URL.
+          tenant_name (string)                : Openstack project name
+          mgmt_network(string, optional)      : Management network name. Each VM created with this cloud-account will
+                                                have a default interface into management network.
+          cert_validate (boolean, optional)   : In case of SSL/TLS connection if certificate validation is required or not.
+
+        """
+        insecure = not cert_validate
+        if auth_url.find('/v3') != -1:
+            self.ks_drv        = KeystoneDriverV3(username, password, auth_url, tenant_name, insecure)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV21(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        elif auth_url.find('/v2') != -1:
+            self.ks_drv        = KeystoneDriverV2(username, password, auth_url, tenant_name, insecure)
+            self.glance_drv    = GlanceDriverV2(self.ks_drv)
+            self.nova_drv      = NovaDriverV2(self.ks_drv)
+            self.neutron_drv   = NeutronDriverV2(self.ks_drv)
+            self.ceilo_drv     = CeilometerDriverV2(self.ks_drv)
+        else:
+            logger.error("Could not identity the version information for openstack service endpoints. Auth_URL should contain \"/v2\" or \"/v3\" string in it")
+            raise NotImplementedError("Auth URL is wrong or invalid. Only Keystone v2 & v3 supported")
+
+        if mgmt_network != None:
+            self._mgmt_network = mgmt_network
+
+            networks = []
+            try:
+                ntconn   = self.neutron_drv._get_neutron_connection()
+                networks = ntconn.list_networks()
+            except Exception as e:
+                logger.error("OpenstackDriver: List Network operation failed. Exception: %s" %(str(e)))
+                raise
+
+            network_list = [ network for network in networks['networks'] if network['name'] == mgmt_network ]
+
+            if not network_list:
+                raise NeutronException.NotFound("Could not find network %s" %(mgmt_network))
+            self._mgmt_network_id = network_list[0]['id']
+
+    def validate_account_creds(self):
+        try:
+            ksconn = self.ks_drv._get_keystone_connection()
+        except KeystoneExceptions.AuthorizationFailure as e:
+            logger.error("OpenstackDriver: Unable to authenticate or validate the existing credentials. Exception: %s" %(str(e)))
+            raise ValidationError("Invalid Credentials: "+ str(e))
+        except Exception as e:
+            logger.error("OpenstackDriver: Could not connect to Openstack. Exception: %s" %(str(e)))
+            raise ValidationError("Connection Error: "+ str(e))
+
+    def get_mgmt_network_id(self):
+        return self._mgmt_network_id
+
+    def glance_image_create(self, **kwargs):
+        if not 'disk_format' in kwargs:
+            kwargs['disk_format'] = 'qcow2'
+        if not 'container_format' in kwargs:
+            kwargs['container_format'] = 'bare'
+        if not 'min_disk' in kwargs:
+            kwargs['min_disk'] = 0
+        if not 'min_ram' in kwargs:
+            kwargs['min_ram'] = 0
+        return self.glance_drv.image_create(**kwargs)
+
+    def glance_image_upload(self, image_id, fd):
+        self.glance_drv.image_upload(image_id, fd)
+
+    def glance_image_add_location(self, image_id, location):
+        self.glance_drv.image_add_location(image_id, location)
+
+    def glance_image_delete(self, image_id):
+        self.glance_drv.image_delete(image_id)
+
+    def glance_image_list(self):
+        return self.glance_drv.image_list()
+
+    def glance_image_get(self, image_id):
+        return self.glance_drv.image_get(image_id)
+
+
+    def nova_flavor_list(self):
+        return self.nova_drv.flavor_list()
+
+    def nova_flavor_create(self, name, ram, vcpus, disk, epa_specs):
+        extra_specs = epa_specs if epa_specs else {}
+        return self.nova_drv.flavor_create(name,
+                                           ram         = ram,
+                                           vcpu        = vcpus,
+                                           disk        = disk,
+                                           extra_specs = extra_specs)
+
+    def nova_flavor_delete(self, flavor_id):
+        self.nova_drv.flavor_delete(flavor_id)
+
+    def nova_flavor_get(self, flavor_id):
+        return self.nova_drv.flavor_get(flavor_id)
+
+    def nova_server_create(self, **kwargs):
+        assert kwargs['flavor_id'] == self.nova_drv.flavor_get(kwargs['flavor_id'])['id']
+        image = self.glance_drv.image_get(kwargs['image_id'])
+        if image['status'] != 'active':
+            raise GlanceException.NotFound("Image with image_id: %s not found in active state. Current State: %s" %(image['id'], image['status']))
+
+        # if 'network_list' in kwargs:
+        #     kwargs['network_list'].append(self._mgmt_network_id)
+        # else:
+        #     kwargs['network_list'] = [self._mgmt_network_id]
+
+        if 'security_groups' not in kwargs:
+            nvconn = self.nova_drv._get_nova_connection()
+            sec_groups = nvconn.security_groups.list()
+            if sec_groups:
+                ## Should we add VM in all availability security_groups ???
+                kwargs['security_groups'] = [x.name for x in sec_groups]
+            else:
+                kwargs['security_groups'] = None
+
+        return self.nova_drv.server_create(**kwargs)
+
+    def nova_server_add_port(self, server_id, port_id):
+        self.nova_drv.server_add_port(server_id, port_id)
+
+    def nova_server_delete_port(self, server_id, port_id):
+        self.nova_drv.server_delete_port(server_id, port_id)
+
+    def nova_server_start(self, server_id):
+        self.nova_drv.server_start(server_id)
+
+    def nova_server_stop(self, server_id):
+        self.nova_drv.server_stop(server_id)
+
+    def nova_server_delete(self, server_id):
+        self.nova_drv.server_delete(server_id)
+
+    def nova_server_reboot(self, server_id):
+        self.nova_drv.server_reboot(server_id, reboot_type='HARD')
+
+    def nova_server_rebuild(self, server_id, image_id):
+        self.nova_drv.server_rebuild(server_id, image_id)
+
+    def nova_floating_ip_list(self):
+        return self.nova_drv.floating_ip_list()
+
+    def nova_floating_ip_create(self, pool = None):
+        return self.nova_drv.floating_ip_create(pool)
+
+    def nova_floating_ip_delete(self, floating_ip):
+        self.nova_drv.floating_ip_delete(floating_ip)
+
+    def nova_floating_ip_assign(self, server_id, floating_ip, fixed_ip):
+        self.nova_drv.floating_ip_assign(server_id, floating_ip, fixed_ip)
+
+    def nova_floating_ip_release(self, server_id, floating_ip):
+        self.nova_drv.floating_ip_release(server_id, floating_ip)
+
+    def nova_server_list(self):
+        return self.nova_drv.server_list()
+
+    def nova_server_get(self, server_id):
+        return self.nova_drv.server_get(server_id)
+
+    def nova_server_console(self, server_id):
+        return self.nova_drv.server_console(server_id)
+
+    def nova_server_group_list(self):
+        return self.nova_drv.group_list()
+
+    def neutron_network_list(self):
+        return self.neutron_drv.network_list()
+
+    def neutron_network_get(self, network_id):
+        return self.neutron_drv.network_get(network_id)
+
+    def neutron_network_create(self, **kwargs):
+        return self.neutron_drv.network_create(**kwargs)
+
+    def neutron_network_delete(self, network_id):
+        self.neutron_drv.network_delete(network_id)
+
+    def neutron_subnet_list(self):
+        return self.neutron_drv.subnet_list()
+
+    def neutron_subnet_get(self, subnet_id):
+        return self.neutron_drv.subnet_get(subnet_id)
+
+    def neutron_subnet_create(self, **kwargs):
+        return self.neutron_drv.subnet_create(**kwargs)
+
+    def netruon_subnet_delete(self, subnet_id):
+        self.neutron_drv.subnet_delete(subnet_id)
+
+    def neutron_subnetpool_list(self):
+        return self.neutron_drv.subnetpool_list()
+
+    def netruon_subnetpool_by_name(self, pool_name):
+        pool_list = self.neutron_drv.subnetpool_list(**{'name': pool_name})
+        if pool_list:
+            return pool_list[0]
+        else:
+            return None
+        
+    def neutron_port_list(self, **kwargs):
+        return self.neutron_drv.port_list(**kwargs)
+
+    def neutron_port_get(self, port_id):
+        return self.neutron_drv.port_get(port_id)
+
+    def neutron_port_create(self, **kwargs):
+        subnets = [subnet for subnet in self.neutron_drv.subnet_list() if subnet['network_id'] == kwargs['network_id']]
+        assert len(subnets) == 1
+        kwargs['subnet_id'] = subnets[0]['id']
+        if not 'admin_state_up' in kwargs:
+            kwargs['admin_state_up'] = True
+        port_id =  self.neutron_drv.port_create(**kwargs)
+
+        if 'vm_id' in kwargs:
+            self.nova_server_add_port(kwargs['vm_id'], port_id)
+        return port_id
+
+    def neutron_security_group_list(self):
+        return self.neutron_drv.security_group_list()
+
+    def neutron_security_group_by_name(self, group_name):
+        group_list = self.neutron_drv.security_group_list()
+        groups = [group for group in group_list if group['name'] == group_name]
+        if groups:
+            return groups[0]
+        else:
+            return None
+
+    def neutron_port_delete(self, port_id):
+        self.neutron_drv.port_delete(port_id)
+
+    def ceilo_meter_endpoint(self):
+        return self.ceilo_drv.endpoint
+
+    def ceilo_meter_list(self):
+        return self.ceilo_drv.meters
+
+    def ceilo_nfvi_metrics(self, vim_id):
+        """Returns a dict of NFVI metrics for a given VM
+
+        Arguments:
+            vim_id - the VIM ID of the VM to retrieve the metrics for
+
+        Returns:
+            A dict of NFVI metrics
+
+        """
+        def query_latest_sample(counter_name):
+            try:
+                filter = json.dumps({
+                    "and": [
+                        {"=": {"resource": vim_id}},
+                        {"=": {"counter_name": counter_name}}
+                        ]
+                    })
+                orderby = json.dumps([{"timestamp": "DESC"}])
+                result = self.ceilo_drv.client.query_samples.query(
+                        filter=filter,
+                        orderby=orderby,
+                        limit=1,
+                        )
+                return result[0]
+
+            except IndexError:
+                pass
+
+            except Exception as e:
+                logger.error("Got exception while querying ceilometer, exception details:%s " %str(e))
+
+            return None
+
+        memory_usage = query_latest_sample("memory.usage")
+        disk_usage = query_latest_sample("disk.usage")
+        cpu_util = query_latest_sample("cpu_util")
+
+        metrics = dict()
+
+        if memory_usage is not None:
+            memory_usage.volume = 1e6 * memory_usage.volume
+            metrics["memory_usage"] = memory_usage.to_dict()
+
+        if disk_usage is not None:
+            metrics["disk_usage"] = disk_usage.to_dict()
+
+        if cpu_util is not None:
+            metrics["cpu_util"] = cpu_util.to_dict()
+
+        return metrics
+
+    def ceilo_alarm_list(self):
+        """Returns a list of ceilometer alarms"""
+        return self.ceilo_drv.client.alarms.list()
+
+    def ceilo_alarm_create(self,
+                           name,
+                           meter,
+                           statistic,
+                           operation,
+                           threshold,
+                           period,
+                           evaluations,
+                           severity='low',
+                           repeat=True,
+                           enabled=True,
+                           actions=None,
+                           **kwargs):
+        """Create a new Alarm
+
+        Arguments:
+            name        - the name of the alarm
+            meter       - the name of the meter to measure
+            statistic   - the type of statistic used to trigger the alarm
+                          ('avg', 'min', 'max', 'count', 'sum')
+            operation   - the relational operator that, combined with the
+                          threshold value, determines  when the alarm is
+                          triggered ('lt', 'le', 'eq', 'ge', 'gt')
+            threshold   - the value of the statistic that will trigger the
+                          alarm
+            period      - the duration (seconds) over which to evaluate the
+                          specified statistic
+            evaluations - the number of samples of the meter statistic to
+                          collect when evaluating the threshold
+            severity    - a measure of the urgency or importance of the alarm
+                          ('low', 'moderate', 'critical')
+            repeat      - a flag that indicates whether the alarm should be
+                          triggered once (False) or repeatedly while the alarm
+                          condition is true (True)
+            enabled     - a flag that indicates whether the alarm is enabled
+                          (True) or disabled (False)
+            actions     - a dict specifying the URLs for webhooks. The dict can
+                          have up to 3 keys: 'insufficient_data', 'alarm',
+                          'ok'. Each key is associated with a list of URLs to
+                          webhooks that will be invoked when one of the 3
+                          actions is taken.
+            kwargs      - an arbitrary dict of keyword arguments that are
+                          passed to the ceilometer client
+
+        """
+        ok_actions = actions.get('ok') if actions is not None else None
+        alarm_actions = actions.get('alarm') if actions is not None else None
+        insufficient_data_actions = actions.get('insufficient_data') if actions is not None else None
+
+        return self.ceilo_drv.client.alarms.create(
+                name=name,
+                meter_name=meter,
+                statistic=statistic,
+                comparison_operator=operation,
+                threshold=threshold,
+                period=period,
+                evaluation_periods=evaluations,
+                severity=severity,
+                repeat_actions=repeat,
+                enabled=enabled,
+                ok_actions=ok_actions,
+                alarm_actions=alarm_actions,
+                insufficient_data_actions=insufficient_data_actions,
+                **kwargs
+                )
+
+    def ceilo_alarm_update(self, alarm_id, **kwargs):
+        """Updates an existing alarm
+
+        Arguments:
+            alarm_id - the identifier of the alarm to update
+            kwargs   - a dict of the alarm attributes to update
+
+        """
+        return self.ceilo_drv.client.alarms.update(alarm_id, **kwargs)
+
+    def ceilo_alarm_delete(self, alarm_id):
+        self.ceilo_drv.client.alarms.delete(alarm_id)
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/openstack_utils.py
new file mode 100644 (file)
index 0000000..eda3ccb
--- /dev/null
@@ -0,0 +1,552 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+import re
+
+class OpenstackGuestEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpu_pinning_policy = {
+            'DEDICATED' : 'dedicated',
+            'SHARED'    : 'shared',
+            'ANY'       : 'any',
+        }
+
+        self._espec_to_mano_cpu_pinning_policy = {
+            'dedicated' : 'DEDICATED',
+            'shared'    : 'SHARED',
+            'any'       : 'ANY',
+        }
+        
+        self._mano_to_espec_mempage_size = {
+            'LARGE'        : 'large', 
+            'SMALL'        : 'small',
+            'SIZE_2MB'     :  2048,
+            'SIZE_1GB'     :  1048576,
+            'PREFER_LARGE' : 'large',
+        }
+
+        self._espec_to_mano_mempage_size = {
+            'large'        : 'LARGE', 
+            'small'        : 'SMALL',
+             2048          : 'SIZE_2MB',
+             1048576       : 'SIZE_1GB',
+            'large'        : 'PREFER_LARGE',
+        }
+
+        self._mano_to_espec_cpu_thread_pinning_policy = {
+            'AVOID'    : 'avoid',
+            'SEPARATE' : 'separate',
+            'ISOLATE'  : 'isolate',
+            'PREFER'   : 'prefer',
+        }
+
+        self._espec_to_mano_cpu_thread_pinning_policy = {
+            'avoid'    : 'AVOID',
+            'separate' : 'SEPARATE',
+            'isolate'  : 'ISOLATE',
+            'prefer'   : 'PREFER',
+        }
+
+        self._espec_to_mano_numa_memory_policy = {
+            'strict'   : 'STRICT',
+            'preferred': 'PREFERRED'
+        }
+
+        self._mano_to_espec_numa_memory_policy = {
+            'STRICT'   : 'strict',
+            'PREFERRED': 'preferred'
+        }
+
+    def mano_to_extra_spec_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._mano_to_espec_cpu_pinning_policy:
+            return self._mano_to_espec_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_pinning_policy(self, cpu_pinning_policy):
+        if cpu_pinning_policy in self._espec_to_mano_cpu_pinning_policy:
+            return self._espec_to_mano_cpu_pinning_policy[cpu_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_mempage_size(self, mempage_size):
+        if mempage_size in self._mano_to_espec_mempage_size:
+            return self._mano_to_espec_mempage_size[mempage_size]
+        else:
+            return None
+        
+    def extra_spec_to_mano_mempage_size(self, mempage_size):
+        if mempage_size in self._espec_to_mano_mempage_size:
+            return self._espec_to_mano_mempage_size[mempage_size]
+        else:
+            return None
+
+    def mano_to_extra_spec_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._mano_to_espec_cpu_thread_pinning_policy:
+            return self._mano_to_espec_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_thread_pinning_policy(self, cpu_thread_pinning_policy):
+        if cpu_thread_pinning_policy in self._espec_to_mano_cpu_thread_pinning_policy:
+            return self._espec_to_mano_cpu_thread_pinning_policy[cpu_thread_pinning_policy]
+        else:
+            return None
+
+    def mano_to_extra_spec_trusted_execution(self, trusted_execution):
+        if trusted_execution:
+            return 'trusted'
+        else:
+            return 'untrusted'
+
+    def extra_spec_to_mano_trusted_execution(self, trusted_execution):
+        if trusted_execution == 'trusted':
+            return True
+        elif trusted_execution == 'untrusted':
+            return False
+        else:
+            return None
+        
+    def mano_to_extra_spec_numa_node_count(self, numa_node_count):
+        return numa_node_count
+
+    def extra_specs_to_mano_numa_node_count(self, numa_node_count):
+        return int(numa_node_count)
+    
+    def mano_to_extra_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._mano_to_espec_numa_memory_policy:
+            return self._mano_to_espec_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+
+    def extra_to_mano_spec_numa_memory_policy(self, numa_memory_policy):
+        if numa_memory_policy in self._espec_to_mano_numa_memory_policy:
+            return self._espec_to_mano_numa_memory_policy[numa_memory_policy]
+        else:
+            return None
+        
+                                                          
+    
+    
+class OpenstackHostEPAUtils(object):
+    """
+    Utility class for Host EPA to Openstack flavor extra_specs conversion routines
+    """
+    def __init__(self):
+        self._mano_to_espec_cpumodel = {
+            "PREFER_WESTMERE"     : "Westmere",
+            "REQUIRE_WESTMERE"    : "Westmere",
+            "PREFER_SANDYBRIDGE"  : "SandyBridge",
+            "REQUIRE_SANDYBRIDGE" : "SandyBridge",
+            "PREFER_IVYBRIDGE"    : "IvyBridge",
+            "REQUIRE_IVYBRIDGE"   : "IvyBridge",
+            "PREFER_HASWELL"      : "Haswell",
+            "REQUIRE_HASWELL"     : "Haswell",
+            "PREFER_BROADWELL"    : "Broadwell",
+            "REQUIRE_BROADWELL"   : "Broadwell",
+            "PREFER_NEHALEM"      : "Nehalem",
+            "REQUIRE_NEHALEM"     : "Nehalem",
+            "PREFER_PENRYN"       : "Penryn",
+            "REQUIRE_PENRYN"      : "Penryn",
+            "PREFER_CONROE"       : "Conroe",
+            "REQUIRE_CONROE"      : "Conroe",
+            "PREFER_CORE2DUO"     : "Core2Duo",
+            "REQUIRE_CORE2DUO"    : "Core2Duo",
+        }
+
+        self._espec_to_mano_cpumodel = {
+            "Westmere"     : "REQUIRE_WESTMERE",
+            "SandyBridge"  : "REQUIRE_SANDYBRIDGE",
+            "IvyBridge"    : "REQUIRE_IVYBRIDGE",
+            "Haswell"      : "REQUIRE_HASWELL",
+            "Broadwell"    : "REQUIRE_BROADWELL",
+            "Nehalem"      : "REQUIRE_NEHALEM",
+            "Penryn"       : "REQUIRE_PENRYN",
+            "Conroe"       : "REQUIRE_CONROE",
+            "Core2Duo"     : "REQUIRE_CORE2DUO",
+        }
+
+        self._mano_to_espec_cpuarch = {
+            "PREFER_X86"     : "x86",
+            "REQUIRE_X86"    : "x86",
+            "PREFER_X86_64"  : "x86_64",
+            "REQUIRE_X86_64" : "x86_64",
+            "PREFER_I686"    : "i686",
+            "REQUIRE_I686"   : "i686",
+            "PREFER_IA64"    : "ia64",
+            "REQUIRE_IA64"   : "ia64",
+            "PREFER_ARMV7"   : "ARMv7",
+            "REQUIRE_ARMV7"  : "ARMv7",
+            "PREFER_ARMV8"   : "ARMv8-A",
+            "REQUIRE_ARMV8"  : "ARMv8-A",
+        }
+
+        self._espec_to_mano_cpuarch = {
+            "x86"     : "REQUIRE_X86",
+            "x86_64"  : "REQUIRE_X86_64",
+            "i686"    : "REQUIRE_I686",
+            "ia64"    : "REQUIRE_IA64",
+            "ARMv7-A" : "REQUIRE_ARMV7",
+            "ARMv8-A" : "REQUIRE_ARMV8",
+        }
+
+        self._mano_to_espec_cpuvendor = {
+            "PREFER_INTEL"  : "Intel",
+            "REQUIRE_INTEL" : "Intel",
+            "PREFER_AMD"    : "AMD",
+            "REQUIRE_AMD"   : "AMD",
+        }
+
+        self._espec_to_mano_cpuvendor = {
+            "Intel" : "REQUIRE_INTEL",
+            "AMD"   : "REQUIRE_AMD",
+        }
+
+        self._mano_to_espec_cpufeatures = {
+            "PREFER_AES"       : "aes",
+            "REQUIRE_AES"      : "aes",
+            "REQUIRE_VME"      : "vme",
+            "PREFER_VME"       : "vme",
+            "REQUIRE_DE"       : "de",
+            "PREFER_DE"        : "de",
+            "REQUIRE_PSE"      : "pse",
+            "PREFER_PSE"       : "pse",
+            "REQUIRE_TSC"      : "tsc",
+            "PREFER_TSC"       : "tsc",
+            "REQUIRE_MSR"      : "msr",
+            "PREFER_MSR"       : "msr",
+            "REQUIRE_PAE"      : "pae",
+            "PREFER_PAE"       : "pae",
+            "REQUIRE_MCE"      : "mce",
+            "PREFER_MCE"       : "mce",
+            "REQUIRE_CX8"      : "cx8",
+            "PREFER_CX8"       : "cx8",
+            "REQUIRE_APIC"     : "apic",
+            "PREFER_APIC"      : "apic",
+            "REQUIRE_SEP"      : "sep",
+            "PREFER_SEP"       : "sep",
+            "REQUIRE_MTRR"     : "mtrr",
+            "PREFER_MTRR"      : "mtrr",
+            "REQUIRE_PGE"      : "pge",
+            "PREFER_PGE"       : "pge",
+            "REQUIRE_MCA"      : "mca",
+            "PREFER_MCA"       : "mca",
+            "REQUIRE_CMOV"     : "cmov",
+            "PREFER_CMOV"      : "cmov",
+            "REQUIRE_PAT"      : "pat",
+            "PREFER_PAT"       : "pat",
+            "REQUIRE_PSE36"    : "pse36",
+            "PREFER_PSE36"     : "pse36",
+            "REQUIRE_CLFLUSH"  : "clflush",
+            "PREFER_CLFLUSH"   : "clflush",
+            "REQUIRE_DTS"      : "dts",
+            "PREFER_DTS"       : "dts",
+            "REQUIRE_ACPI"     : "acpi",
+            "PREFER_ACPI"      : "acpi",
+            "REQUIRE_MMX"      : "mmx",
+            "PREFER_MMX"       : "mmx",
+            "REQUIRE_FXSR"     : "fxsr",
+            "PREFER_FXSR"      : "fxsr",
+            "REQUIRE_SSE"      : "sse",
+            "PREFER_SSE"       : "sse",
+            "REQUIRE_SSE2"     : "sse2",
+            "PREFER_SSE2"      : "sse2",
+            "REQUIRE_SS"       : "ss",
+            "PREFER_SS"        : "ss",
+            "REQUIRE_HT"       : "ht",
+            "PREFER_HT"        : "ht",
+            "REQUIRE_TM"       : "tm",
+            "PREFER_TM"        : "tm",
+            "REQUIRE_IA64"     : "ia64",
+            "PREFER_IA64"      : "ia64",
+            "REQUIRE_PBE"      : "pbe",
+            "PREFER_PBE"       : "pbe",
+            "REQUIRE_RDTSCP"   : "rdtscp",
+            "PREFER_RDTSCP"    : "rdtscp",
+            "REQUIRE_PNI"      : "pni",
+            "PREFER_PNI"       : "pni",
+            "REQUIRE_PCLMULQDQ": "pclmulqdq",
+            "PREFER_PCLMULQDQ" : "pclmulqdq",
+            "REQUIRE_DTES64"   : "dtes64",
+            "PREFER_DTES64"    : "dtes64",
+            "REQUIRE_MONITOR"  : "monitor",
+            "PREFER_MONITOR"   : "monitor",
+            "REQUIRE_DS_CPL"   : "ds_cpl",
+            "PREFER_DS_CPL"    : "ds_cpl",
+            "REQUIRE_VMX"      : "vmx",
+            "PREFER_VMX"       : "vmx",
+            "REQUIRE_SMX"      : "smx",
+            "PREFER_SMX"       : "smx",
+            "REQUIRE_EST"      : "est",
+            "PREFER_EST"       : "est",
+            "REQUIRE_TM2"      : "tm2",
+            "PREFER_TM2"       : "tm2",
+            "REQUIRE_SSSE3"    : "ssse3",
+            "PREFER_SSSE3"     : "ssse3",
+            "REQUIRE_CID"      : "cid",
+            "PREFER_CID"       : "cid",
+            "REQUIRE_FMA"      : "fma",
+            "PREFER_FMA"       : "fma",
+            "REQUIRE_CX16"     : "cx16",
+            "PREFER_CX16"      : "cx16",
+            "REQUIRE_XTPR"     : "xtpr",
+            "PREFER_XTPR"      : "xtpr",
+            "REQUIRE_PDCM"     : "pdcm",
+            "PREFER_PDCM"      : "pdcm",
+            "REQUIRE_PCID"     : "pcid",
+            "PREFER_PCID"      : "pcid",
+            "REQUIRE_DCA"      : "dca",
+            "PREFER_DCA"       : "dca",
+            "REQUIRE_SSE4_1"   : "sse4_1",
+            "PREFER_SSE4_1"    : "sse4_1",
+            "REQUIRE_SSE4_2"   : "sse4_2",
+            "PREFER_SSE4_2"    : "sse4_2",
+            "REQUIRE_X2APIC"   : "x2apic",
+            "PREFER_X2APIC"    : "x2apic",
+            "REQUIRE_MOVBE"    : "movbe",
+            "PREFER_MOVBE"     : "movbe",
+            "REQUIRE_POPCNT"   : "popcnt",
+            "PREFER_POPCNT"    : "popcnt",
+            "REQUIRE_TSC_DEADLINE_TIMER"   : "tsc_deadline_timer",
+            "PREFER_TSC_DEADLINE_TIMER"    : "tsc_deadline_timer",
+            "REQUIRE_XSAVE"    : "xsave",
+            "PREFER_XSAVE"     : "xsave",
+            "REQUIRE_AVX"      : "avx",
+            "PREFER_AVX"       : "avx",
+            "REQUIRE_F16C"     : "f16c",
+            "PREFER_F16C"      : "f16c",
+            "REQUIRE_RDRAND"   : "rdrand",
+            "PREFER_RDRAND"    : "rdrand",
+            "REQUIRE_FSGSBASE" : "fsgsbase",
+            "PREFER_FSGSBASE"  : "fsgsbase",
+            "REQUIRE_BMI1"     : "bmi1",
+            "PREFER_BMI1"      : "bmi1",
+            "REQUIRE_HLE"      : "hle",
+            "PREFER_HLE"       : "hle",
+            "REQUIRE_AVX2"     : "avx2",
+            "PREFER_AVX2"      : "avx2",
+            "REQUIRE_SMEP"     : "smep",
+            "PREFER_SMEP"      : "smep",
+            "REQUIRE_BMI2"     : "bmi2",
+            "PREFER_BMI2"      : "bmi2",
+            "REQUIRE_ERMS"     : "erms",
+            "PREFER_ERMS"      : "erms",
+            "REQUIRE_INVPCID"  : "invpcid",
+            "PREFER_INVPCID"   : "invpcid",
+            "REQUIRE_RTM"      : "rtm",
+            "PREFER_RTM"       : "rtm",
+            "REQUIRE_MPX"      : "mpx",
+            "PREFER_MPX"       : "mpx",
+            "REQUIRE_RDSEED"   : "rdseed",
+            "PREFER_RDSEED"    : "rdseed",
+            "REQUIRE_ADX"      : "adx",
+            "PREFER_ADX"       : "adx",
+            "REQUIRE_SMAP"     : "smap",
+            "PREFER_SMAP"      : "smap",
+        }
+
+        self._espec_to_mano_cpufeatures = {
+            "aes"      : "REQUIRE_AES",
+            "vme"      : "REQUIRE_VME",
+            "de"       : "REQUIRE_DE",
+            "pse"      : "REQUIRE_PSE",
+            "tsc"      : "REQUIRE_TSC",
+            "msr"      : "REQUIRE_MSR",
+            "pae"      : "REQUIRE_PAE",
+            "mce"      : "REQUIRE_MCE",
+            "cx8"      : "REQUIRE_CX8",
+            "apic"     : "REQUIRE_APIC",
+            "sep"      : "REQUIRE_SEP",
+            "mtrr"     : "REQUIRE_MTRR",
+            "pge"      : "REQUIRE_PGE",
+            "mca"      : "REQUIRE_MCA",
+            "cmov"     : "REQUIRE_CMOV",
+            "pat"      : "REQUIRE_PAT",
+            "pse36"    : "REQUIRE_PSE36",
+            "clflush"  : "REQUIRE_CLFLUSH",
+            "dts"      : "REQUIRE_DTS",
+            "acpi"     : "REQUIRE_ACPI",
+            "mmx"      : "REQUIRE_MMX",
+            "fxsr"     : "REQUIRE_FXSR",
+            "sse"      : "REQUIRE_SSE",
+            "sse2"     : "REQUIRE_SSE2",
+            "ss"       : "REQUIRE_SS",
+            "ht"       : "REQUIRE_HT",
+            "tm"       : "REQUIRE_TM",
+            "ia64"     : "REQUIRE_IA64",
+            "pbe"      : "REQUIRE_PBE",
+            "rdtscp"   : "REQUIRE_RDTSCP",
+            "pni"      : "REQUIRE_PNI",
+            "pclmulqdq": "REQUIRE_PCLMULQDQ",
+            "dtes64"   : "REQUIRE_DTES64",
+            "monitor"  : "REQUIRE_MONITOR",
+            "ds_cpl"   : "REQUIRE_DS_CPL",
+            "vmx"      : "REQUIRE_VMX",
+            "smx"      : "REQUIRE_SMX",
+            "est"      : "REQUIRE_EST",
+            "tm2"      : "REQUIRE_TM2",
+            "ssse3"    : "REQUIRE_SSSE3",
+            "cid"      : "REQUIRE_CID",
+            "fma"      : "REQUIRE_FMA",
+            "cx16"     : "REQUIRE_CX16",
+            "xtpr"     : "REQUIRE_XTPR",
+            "pdcm"     : "REQUIRE_PDCM",
+            "pcid"     : "REQUIRE_PCID",
+            "dca"      : "REQUIRE_DCA",
+            "sse4_1"   : "REQUIRE_SSE4_1",
+            "sse4_2"   : "REQUIRE_SSE4_2",
+            "x2apic"   : "REQUIRE_X2APIC",
+            "movbe"    : "REQUIRE_MOVBE",
+            "popcnt"   : "REQUIRE_POPCNT",
+            "tsc_deadline_timer"   : "REQUIRE_TSC_DEADLINE_TIMER",
+            "xsave"    : "REQUIRE_XSAVE",
+            "avx"      : "REQUIRE_AVX",
+            "f16c"     : "REQUIRE_F16C",
+            "rdrand"   : "REQUIRE_RDRAND",
+            "fsgsbase" : "REQUIRE_FSGSBASE",
+            "bmi1"     : "REQUIRE_BMI1",
+            "hle"      : "REQUIRE_HLE",
+            "avx2"     : "REQUIRE_AVX2",
+            "smep"     : "REQUIRE_SMEP",
+            "bmi2"     : "REQUIRE_BMI2",
+            "erms"     : "REQUIRE_ERMS",
+            "invpcid"  : "REQUIRE_INVPCID",
+            "rtm"      : "REQUIRE_RTM",
+            "mpx"      : "REQUIRE_MPX",
+            "rdseed"   : "REQUIRE_RDSEED",
+            "adx"      : "REQUIRE_ADX",
+            "smap"     : "REQUIRE_SMAP",
+        }
+
+    def mano_to_extra_spec_cpu_model(self, cpu_model):
+        if cpu_model in self._mano_to_espec_cpumodel:
+            return self._mano_to_espec_cpumodel[cpu_model]
+        else:
+            return None
+            
+    def extra_specs_to_mano_cpu_model(self, cpu_model):
+        if cpu_model in self._espec_to_mano_cpumodel:
+            return self._espec_to_mano_cpumodel[cpu_model]
+        else:
+            return None
+        
+    def mano_to_extra_spec_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._mano_to_espec_cpuarch:
+            return self._mano_to_espec_cpuarch[cpu_arch]
+        else:
+            return None
+        
+    def extra_specs_to_mano_cpu_arch(self, cpu_arch):
+        if cpu_arch in self._espec_to_mano_cpuarch:
+            return self._espec_to_mano_cpuarch[cpu_arch]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._mano_to_espec_cpuvendor:
+            return self._mano_to_espec_cpuvendor[cpu_vendor]
+        else:
+            return None
+
+    def extra_spec_to_mano_cpu_vendor(self, cpu_vendor):
+        if cpu_vendor in self._espec_to_mano_cpuvendor:
+            return self._espec_to_mano_cpuvendor[cpu_vendor]
+        else:
+            return None
+    
+    def mano_to_extra_spec_cpu_socket_count(self, cpu_sockets):
+        return cpu_sockets
+
+    def extra_spec_to_mano_cpu_socket_count(self, cpu_sockets):
+        return int(cpu_sockets)
+    
+    def mano_to_extra_spec_cpu_core_count(self, cpu_core_count):
+        return cpu_core_count
+
+    def extra_spec_to_mano_cpu_core_count(self, cpu_core_count):
+        return int(cpu_core_count)
+    
+    def mano_to_extra_spec_cpu_core_thread_count(self, core_thread_count):
+        return core_thread_count
+
+    def extra_spec_to_mano_cpu_core_thread_count(self, core_thread_count):
+        return int(core_thread_count)
+
+    def mano_to_extra_spec_cpu_features(self, features):
+        cpu_features = []
+        epa_feature_str = None
+        for f in features:
+            if f in self._mano_to_espec_cpufeatures:
+                cpu_features.append(self._mano_to_espec_cpufeatures[f])
+                
+        if len(cpu_features) > 1:
+            epa_feature_str =  '<all-in> '+ " ".join(cpu_features)
+        elif len(cpu_features) == 1:
+            epa_feature_str = " ".join(cpu_features)
+
+        return epa_feature_str
+
+    def extra_spec_to_mano_cpu_features(self, features):
+        oper_symbols = ['=', '<in>', '<all-in>', '==', '!=', '>=', '<=', 's==', 's!=', 's<', 's<=', 's>', 's>=']
+        cpu_features = []
+        result = None
+        for oper in oper_symbols:
+            regex = '^'+oper+' (.*?)$'
+            result = re.search(regex, features)
+            if result is not None:
+                break
+            
+        if result is not None:
+            feature_list = result.group(1)
+        else:
+            feature_list = features
+
+        for f in feature_list.split():
+            if f in self._espec_to_mano_cpufeatures:
+                cpu_features.append(self._espec_to_mano_cpufeatures[f])
+
+        return cpu_features
+    
+
+class OpenstackExtraSpecUtils(object):
+    """
+    General utility class for flavor Extra Specs processing
+    """
+    def __init__(self):
+        self.host = OpenstackHostEPAUtils()
+        self.guest = OpenstackGuestEPAUtils()
+        self.extra_specs_keywords = [ 'hw:cpu_policy',
+                                      'hw:cpu_threads_policy',
+                                      'hw:mem_page_size',
+                                      'hw:numa_nodes',
+                                      'hw:numa_mempolicy',
+                                      'hw:numa_cpus',
+                                      'hw:numa_mem',
+                                      'trust:trusted_host',
+                                      'pci_passthrough:alias',
+                                      'capabilities:cpu_info:model',
+                                      'capabilities:cpu_info:arch',
+                                      'capabilities:cpu_info:vendor',
+                                      'capabilities:cpu_info:topology:sockets',
+                                      'capabilities:cpu_info:topology:cores',
+                                      'capabilities:cpu_info:topology:threads',
+                                      'capabilities:cpu_info:features',
+                                ]
+        self.extra_specs_regex = re.compile("^"+"|^".join(self.extra_specs_keywords))
diff --git a/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py b/rwcal/plugins/vala/rwcal_openstack/rift/rwcal/openstack/prepare_vm.py
new file mode 100644 (file)
index 0000000..7acf0fd
--- /dev/null
@@ -0,0 +1,233 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.rwcal.openstack as openstack_drv
+import logging
+import argparse
+import sys, os, time
+import rwlogger
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger()
+
+rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
+                                  subcategory="openstack",)
+logger.addHandler(rwlog_handler)
+#logger.setLevel(logging.DEBUG)
+
+
+def assign_floating_ip_address(drv, argument):
+    if not argument.floating_ip:
+        return
+
+    server = drv.nova_server_get(argument.server_id)
+    logger.info("Assigning the floating_ip: %s to VM: %s" %(argument.floating_ip, server['name']))
+    
+    for i in range(120):
+        server = drv.nova_server_get(argument.server_id)
+        for network_name,network_info in server['addresses'].items():
+            if network_info:
+                if network_name == argument.mgmt_network:
+                    for n_info in network_info:
+                        if 'OS-EXT-IPS:type' in n_info and n_info['OS-EXT-IPS:type'] == 'fixed':
+                            management_ip = n_info['addr']
+                            drv.nova_floating_ip_assign(argument.server_id,
+                                                        argument.floating_ip,
+                                                        management_ip)
+                            logger.info("Assigned floating_ip: %s to management_ip: %s" %(argument.floating_ip, management_ip))
+                        return
+        logger.info("Waiting for management_ip to be assigned to server: %s" %(server['name']))
+        time.sleep(1)
+    else:
+        logger.info("No management_ip IP available to associate floating_ip for server: %s" %(server['name']))
+    return
+
+
+def create_port_metadata(drv, argument):
+    if argument.port_metadata == False:
+        return
+
+    ### Get Management Network ID
+    network_list = drv.neutron_network_list()
+    mgmt_network_id = [net['id'] for net in network_list if net['name'] == argument.mgmt_network][0]
+    port_list = [ port for port in drv.neutron_port_list(**{'device_id': argument.server_id})
+                  if port['network_id'] != mgmt_network_id ]
+    meta_data = {}
+
+    meta_data['rift-meta-ports'] = str(len(port_list))
+    port_id = 0
+    for port in port_list:
+        info = []
+        info.append('"port_name":"'+port['name']+'"')
+        if 'mac_address' in port:
+            info.append('"hw_addr":"'+port['mac_address']+'"')
+        if 'network_id' in port:
+            #info.append('"network_id":"'+port['network_id']+'"')
+            net_name = [net['name'] for net in network_list if net['id'] == port['network_id']]
+            if net_name:
+                info.append('"network_name":"'+net_name[0]+'"')
+        if 'fixed_ips' in port:
+            ip_address = port['fixed_ips'][0]['ip_address']
+            info.append('"ip":"'+ip_address+'"')
+            
+        meta_data['rift-meta-port-'+str(port_id)] = '{' + ','.join(info) + '}'
+        port_id += 1
+        
+    nvconn = drv.nova_drv._get_nova_connection()
+    nvconn.servers.set_meta(argument.server_id, meta_data)
+    
+        
+def prepare_vm_after_boot(drv,argument):
+    ### Important to call create_port_metadata before assign_floating_ip_address
+    ### since assign_floating_ip_address can wait thus delaying port_metadata creation
+
+    ### Wait for 2 minute for server to come up -- Needs fine tuning
+    wait_time = 120 
+    sleep_time = 1
+    for i in range(int(wait_time/sleep_time)):
+        server = drv.nova_server_get(argument.server_id)
+        if server['status'] == 'ACTIVE':
+            logger.info("Server %s to reached active state" %(server['name']))
+            break
+        elif server['status'] == 'BUILD':
+            logger.info("Waiting for server: %s to build. Current state: %s" %(server['name'], server['status']))
+            time.sleep(sleep_time)
+        else:
+            logger.info("Server %s reached state: %s" %(server['name'], server['status']))
+            sys.exit(3)
+    else:
+        logger.error("Server %s did not reach active state in %d seconds. Current state: %s" %(server['name'], wait_time, server['status']))
+        sys.exit(4)
+    
+    #create_port_metadata(drv, argument)
+    assign_floating_ip_address(drv, argument)
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to create openstack resources')
+    parser.add_argument('--auth_url',
+                        action = "store",
+                        dest = "auth_url",
+                        type = str,
+                        help='Keystone Auth URL')
+
+    parser.add_argument('--username',
+                        action = "store",
+                        dest = "username",
+                        type = str,
+                        help = "Username for openstack installation")
+
+    parser.add_argument('--password',
+                        action = "store",
+                        dest = "password",
+                        type = str,
+                        help = "Password for openstack installation")
+
+    parser.add_argument('--tenant_name',
+                        action = "store",
+                        dest = "tenant_name",
+                        type = str,
+                        help = "Tenant name openstack installation")
+
+    parser.add_argument('--mgmt_network',
+                        action = "store",
+                        dest = "mgmt_network",
+                        type = str,
+                        help = "mgmt_network")
+    
+    parser.add_argument('--server_id',
+                        action = "store",
+                        dest = "server_id",
+                        type = str,
+                        help = "Server ID on which boot operations needs to be performed")
+    
+    parser.add_argument('--floating_ip',
+                        action = "store",
+                        dest = "floating_ip",
+                        type = str,
+                        help = "Floating IP to be assigned")
+
+    parser.add_argument('--port_metadata',
+                        action = "store_true",
+                        dest = "port_metadata",
+                        default = False,
+                        help = "Create Port Metadata")
+
+    argument = parser.parse_args()
+
+    if not argument.auth_url:
+        logger.error("ERROR: AuthURL is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using AuthURL: %s" %(argument.auth_url))
+
+    if not argument.username:
+        logger.error("ERROR: Username is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Username: %s" %(argument.username))
+
+    if not argument.password:
+        logger.error("ERROR: Password is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Password: %s" %(argument.password))
+
+    if not argument.tenant_name:
+        logger.error("ERROR: Tenant Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Tenant Name: %s" %(argument.tenant_name))
+
+    if not argument.mgmt_network:
+        logger.error("ERROR: Management Network Name is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Management Network: %s" %(argument.mgmt_network))
+        
+    if not argument.server_id:
+        logger.error("ERROR: Server ID is not configured")
+        sys.exit(1)
+    else:
+        logger.info("Using Server ID : %s" %(argument.server_id))
+        
+        
+    try:
+        pid = os.fork()
+        if pid > 0:
+            # exit for parent
+            sys.exit(0)
+    except OSError as e:
+        logger.error("fork failed: %d (%s)\n" % (e.errno, e.strerror))
+        sys.exit(2)
+        
+    drv = openstack_drv.OpenstackDriver(username = argument.username,
+                                        password = argument.password,
+                                        auth_url = argument.auth_url,
+                                        tenant_name = argument.tenant_name,
+                                        mgmt_network = argument.mgmt_network)
+    prepare_vm_after_boot(drv, argument)
+    sys.exit(0)
+    
+if __name__ == "__main__":
+    main()
+        
+
diff --git a/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py b/rwcal/plugins/vala/rwcal_openstack/rwcal_openstack.py
new file mode 100644 (file)
index 0000000..8e0c710
--- /dev/null
@@ -0,0 +1,2122 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import contextlib
+import logging
+import os
+import subprocess
+import uuid
+
+import rift.rwcal.openstack as openstack_drv
+import rw_status
+import rift.cal.rwcal_status as rwcal_status
+import rwlogger
+import neutronclient.common.exceptions as NeutronException
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+PREPARE_VM_CMD = "prepare_vm.py --auth_url {auth_url} --username {username} --password {password} --tenant_name {tenant_name} --mgmt_network {mgmt_network} --server_id {server_id} --port_metadata"
+
+rwstatus_exception_map = { IndexError: RwTypes.RwStatus.NOTFOUND,
+                           KeyError: RwTypes.RwStatus.NOTFOUND,
+                           NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,}
+
+rwstatus = rw_status.rwstatus_from_exc_map(rwstatus_exception_map)
+rwcalstatus = rwcal_status.rwcalstatus_from_exc_map(rwstatus_exception_map)
+
+
+espec_utils = openstack_drv.OpenstackExtraSpecUtils()
+
+class OpenstackCALOperationFailure(Exception):
+    pass
+
+class UninitializedPluginError(Exception):
+    pass
+
+
+class OpenstackServerGroupError(Exception):
+    pass
+
+
+class ImageUploadError(Exception):
+    pass
+
+
+class RwcalOpenstackPlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for openstack."""
+
+    instance_num = 1
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = openstack_drv.OpenstackDriver
+        self.log = logging.getLogger('rwcal.openstack.%s' % RwcalOpenstackPlugin.instance_num)
+        self.log.setLevel(logging.DEBUG)
+
+        self._rwlog_handler = None
+        RwcalOpenstackPlugin.instance_num += 1
+
+
+    @contextlib.contextmanager
+    def _use_driver(self, account):
+        if self._rwlog_handler is None:
+            raise UninitializedPluginError("Must call init() in CAL plugin before use.")
+
+        with rwlogger.rwlog_root_handler(self._rwlog_handler):
+            try:
+                drv = self._driver_class(username      = account.openstack.key,
+                                         password      = account.openstack.secret,
+                                         auth_url      = account.openstack.auth_url,
+                                         tenant_name   = account.openstack.tenant,
+                                         mgmt_network  = account.openstack.mgmt_network,
+                                         cert_validate = account.openstack.cert_validate )
+            except Exception as e:
+                self.log.error("RwcalOpenstackPlugin: OpenstackDriver init failed. Exception: %s" %(str(e)))
+                raise
+
+            yield drv
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        self._rwlog_handler = rwlogger.RwLogger(
+                category="rw-cal-log",
+                subcategory="openstack",
+                log_hdl=rwlog_ctx,
+                )
+        self.log.addHandler(self._rwlog_handler)
+        self.log.propagate = False
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_cloud_creds(self, account):
+        """
+        Validates the cloud account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+        Arguments:
+            account - a cloud account to validate
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwcalYang.CloudConnectionStatus()
+
+        try:
+            with self._use_driver(account) as drv:
+                drv.validate_account_creds()
+
+        except openstack_drv.ValidationError as e:
+            self.log.error("RwcalOpenstackPlugin: OpenstackDriver credential validation failed. Exception: %s", str(e))
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+
+        except Exception as e:
+            msg = "RwcalOpenstackPlugin: OpenstackDriver connection failed. Exception: %s" %(str(e))
+            self.log.error(msg)
+            status.status = "failure"
+            status.details = msg
+
+        else:
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_management_network(self, account):
+        """
+        Returns the management network associated with the specified account.
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The management network
+        """
+        return account.openstack.mgmt_network
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_tenant(self, account, name):
+        """Create a new tenant.
+
+        Arguments:
+            account - a cloud account
+            name - name of the tenant
+
+        Returns:
+            The tenant id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        """delete a tenant.
+
+        Arguments:
+            account - a cloud account
+            tenant_id - id of the tenant
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        """List tenants.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of tenants
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_role(self, account, name):
+        """Create a new user.
+
+        Arguments:
+            account - a cloud account
+            name - name of the user
+
+        Returns:
+            The user id
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        """Delete a user.
+
+        Arguments:
+            account - a cloud account
+            role_id - id of the user
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        """List roles.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of roles
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_image(self, account, image):
+        """Create an image
+
+        Arguments:
+            account - a cloud account
+            image - a description of the image to create
+
+        Returns:
+            The image id
+        """
+
+        try:
+            # If the use passed in a file descriptor, use that to
+            # upload the image.
+            if image.has_field("fileno"):
+                new_fileno = os.dup(image.fileno)
+                hdl = os.fdopen(new_fileno, 'rb')
+            else:
+                hdl = open(image.location, "rb")
+        except Exception as e:
+            self.log.error("Could not open file for upload. Exception received: %s", str(e))
+            raise
+
+        with hdl as fd:
+            kwargs = {}
+            kwargs['name'] = image.name
+
+            if image.disk_format:
+                kwargs['disk_format'] = image.disk_format
+            if image.container_format:
+                kwargs['container_format'] = image.container_format
+
+            with self._use_driver(account) as drv:
+                # Create Image
+                image_id = drv.glance_image_create(**kwargs)
+                # Upload the Image
+                drv.glance_image_upload(image_id, fd)
+
+                if image.checksum:
+                    stored_image = drv.glance_image_get(image_id)
+                    if stored_image.checksum != image.checksum:
+                        drv.glance_image_delete(image_id=image_id)
+                        raise ImageUploadError(
+                                "image checksum did not match (actual: %s, expected: %s). Deleting." %
+                                (stored_image.checksum, image.checksum)
+                                )
+
+        return image_id
+
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        """Delete a vm image.
+
+        Arguments:
+            account - a cloud account
+            image_id - id of the image to delete
+        """
+        with self._use_driver(account) as drv:
+            drv.glance_image_delete(image_id=image_id)
+
+
+    @staticmethod
+    def _fill_image_info(img_info):
+        """Create a GI object from image info dictionary
+
+        Converts image information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            account - a cloud account
+            img_info - image information dictionary object from openstack
+
+        Returns:
+            The ImageInfoItem
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = img_info['name']
+        img.id = img_info['id']
+        img.checksum = img_info['checksum']
+        img.disk_format = img_info['disk_format']
+        img.container_format = img_info['container_format']
+        if img_info['status'] == 'active':
+            img.state = 'active'
+        else:
+            img.state = 'inactive'
+        return img
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        """Return a list of the names of all available images.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            The the list of images in VimResources object
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            images = drv.glance_image_list()
+        for img in images:
+            response.imageinfo_list.append(RwcalOpenstackPlugin._fill_image_info(img))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        """Return a image information.
+
+        Arguments:
+            account - a cloud account
+            image_id - an id of the image
+
+        Returns:
+            ImageInfoItem object containing image information.
+        """
+        with self._use_driver(account) as drv:
+            image = drv.glance_image_get(image_id)
+        return RwcalOpenstackPlugin._fill_image_info(image)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vm(self, account, vminfo):
+        """Create a new virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vminfo - information that defines the type of VM to create
+
+        Returns:
+            The image id
+        """
+        kwargs = {}
+        kwargs['name']      = vminfo.vm_name
+        kwargs['flavor_id'] = vminfo.flavor_id
+        kwargs['image_id']  = vminfo.image_id
+
+        with self._use_driver(account) as drv:
+            ### If floating_ip is required and we don't have one, better fail before any further allocation
+            if vminfo.has_field('allocate_public_address') and vminfo.allocate_public_address:
+                if account.openstack.has_field('floating_ip_pool'):
+                    pool_name = account.openstack.floating_ip_pool
+                else:
+                    pool_name = None
+                floating_ip = self._allocate_floating_ip(drv, pool_name)
+            else:
+                floating_ip = None
+
+        if vminfo.has_field('cloud_init') and vminfo.cloud_init.has_field('userdata'):
+            kwargs['userdata']  = vminfo.cloud_init.userdata
+        else:
+            kwargs['userdata'] = ''
+
+        if account.openstack.security_groups:
+            kwargs['security_groups'] = account.openstack.security_groups
+
+        port_list = []
+        for port in vminfo.port_list:
+            port_list.append(port.port_id)
+
+        if port_list:
+            kwargs['port_list'] = port_list
+
+        network_list = []
+        for network in vminfo.network_list:
+            network_list.append(network.network_id)
+
+        if network_list:
+            kwargs['network_list'] = network_list
+
+        metadata = {}
+        for field in vminfo.user_tags.fields:
+            if vminfo.user_tags.has_field(field):
+                metadata[field] = getattr(vminfo.user_tags, field)
+        kwargs['metadata']  = metadata
+
+        if vminfo.has_field('availability_zone'):
+            kwargs['availability_zone']  = vminfo.availability_zone
+        else:
+            kwargs['availability_zone'] = None
+
+        if vminfo.has_field('server_group'):
+            kwargs['scheduler_hints'] = {'group': vminfo.server_group }
+        else:
+            kwargs['scheduler_hints'] = None
+
+        with self._use_driver(account) as drv:
+            vm_id = drv.nova_server_create(**kwargs)
+            if floating_ip:
+                self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+
+        return vm_id
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        """Start an existing virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_start(vm_id)
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        """Stop a running virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_stop(vm_id)
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        """Delete a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_delete(vm_id)
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        """Reboot a virtual machine.
+
+        Arguments:
+            account - a cloud account
+            vm_id - an id of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_server_reboot(vm_id)
+
+    @staticmethod
+    def _fill_vm_info(vm_info, mgmt_network):
+        """Create a GI object from vm info dictionary
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            mgmt_network - Management network
+
+        Returns:
+            Protobuf Gi object for VM
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_id     = vm_info['id']
+        vm.vm_name   = vm_info['name']
+        vm.image_id  = vm_info['image']['id']
+        vm.flavor_id = vm_info['flavor']['id']
+        vm.state     = vm_info['status']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info:
+                if network_name == mgmt_network:
+                    vm.public_ip = next((item['addr']
+                                            for item in network_info
+                                            if item['OS-EXT-IPS:type'] == 'floating'),
+                                        network_info[0]['addr'])
+                    vm.management_ip = network_info[0]['addr']
+                else:
+                    for interface in network_info:
+                        addr = vm.private_ip_list.add()
+                        addr.ip_address = interface['addr']
+
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info and network_name == mgmt_network and not vm.public_ip:
+                for interface in network_info:
+                    if 'OS-EXT-IPS:type' in interface and interface['OS-EXT-IPS:type'] == 'floating':
+                        vm.public_ip = interface['addr']
+
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key in vm.user_tags.fields:
+                setattr(vm.user_tags, key, value)
+        if 'OS-EXT-SRV-ATTR:host' in vm_info:
+            if vm_info['OS-EXT-SRV-ATTR:host'] != None:
+                vm.host_name = vm_info['OS-EXT-SRV-ATTR:host']
+        if 'OS-EXT-AZ:availability_zone' in vm_info:
+            if vm_info['OS-EXT-AZ:availability_zone'] != None:
+                vm.availability_zone = vm_info['OS-EXT-AZ:availability_zone']
+        return vm
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        """Return a list of the VMs as vala boxed objects
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List containing VM information
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            vms = drv.nova_server_list()
+        for vm in vms:
+            response.vminfo_list.append(RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vm(self, account, id):
+        """Return vm information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the VM
+
+        Returns:
+            VM information
+        """
+        with self._use_driver(account) as drv:
+            vm = drv.nova_server_get(id)
+        return RwcalOpenstackPlugin._fill_vm_info(vm, account.openstack.mgmt_network)
+
+    @staticmethod
+    def _get_guest_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for guest_epa attributes
+        """
+        epa_specs = {}
+        if guest_epa.has_field('mempage_size'):
+            mempage_size = espec_utils.guest.mano_to_extra_spec_mempage_size(guest_epa.mempage_size)
+            if mempage_size is not None:
+                epa_specs['hw:mem_page_size'] = mempage_size
+
+        if guest_epa.has_field('cpu_pinning_policy'):
+            cpu_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_pinning_policy(guest_epa.cpu_pinning_policy)
+            if cpu_pinning_policy is not None:
+                epa_specs['hw:cpu_policy'] = cpu_pinning_policy
+
+        if guest_epa.has_field('cpu_thread_pinning_policy'):
+            cpu_thread_pinning_policy = espec_utils.guest.mano_to_extra_spec_cpu_thread_pinning_policy(guest_epa.cpu_thread_pinning_policy)
+            if cpu_thread_pinning_policy is None:
+                epa_specs['hw:cpu_threads_policy'] = cpu_thread_pinning_policy
+
+        if guest_epa.has_field('trusted_execution'):
+            trusted_execution = espec_utils.guest.mano_to_extra_spec_trusted_execution(guest_epa.trusted_execution)
+            if trusted_execution is not None:
+                epa_specs['trust:trusted_host'] = trusted_execution
+
+        if guest_epa.has_field('numa_node_policy'):
+            if guest_epa.numa_node_policy.has_field('node_cnt'):
+                numa_node_count = espec_utils.guest.mano_to_extra_spec_numa_node_count(guest_epa.numa_node_policy.node_cnt)
+                if numa_node_count is not None:
+                    epa_specs['hw:numa_nodes'] = numa_node_count
+
+            if guest_epa.numa_node_policy.has_field('mem_policy'):
+                numa_memory_policy = espec_utils.guest.mano_to_extra_spec_numa_memory_policy(guest_epa.numa_node_policy.mem_policy)
+                if numa_memory_policy is not None:
+                    epa_specs['hw:numa_mempolicy'] = numa_memory_policy
+
+            if guest_epa.numa_node_policy.has_field('node'):
+                for node in guest_epa.numa_node_policy.node:
+                    if node.has_field('vcpu') and node.vcpu:
+                        epa_specs['hw:numa_cpus.'+str(node.id)] = ','.join([str(j) for j in node.vcpu])
+                    if node.memory_mb:
+                        epa_specs['hw:numa_mem.'+str(node.id)] = str(node.memory_mb)
+
+        if guest_epa.has_field('pcie_device'):
+            pci_devices = []
+            for device in guest_epa.pcie_device:
+                pci_devices.append(device.device_id +':'+str(device.count))
+            epa_specs['pci_passthrough:alias'] = ','.join(pci_devices)
+
+        return epa_specs
+
+    @staticmethod
+    def _get_host_epa_specs(host_epa):
+        """
+        Returns EPA Specs dictionary for host_epa attributes
+        """
+
+        epa_specs = {}
+
+        if host_epa.has_field('cpu_model'):
+            cpu_model = espec_utils.host.mano_to_extra_spec_cpu_model(host_epa.cpu_model)
+            if cpu_model is not None:
+                epa_specs['capabilities:cpu_info:model'] = cpu_model
+
+        if host_epa.has_field('cpu_arch'):
+            cpu_arch = espec_utils.host.mano_to_extra_spec_cpu_arch(host_epa.cpu_arch)
+            if cpu_arch is not None:
+                epa_specs['capabilities:cpu_info:arch'] = cpu_arch
+
+        if host_epa.has_field('cpu_vendor'):
+            cpu_vendor = espec_utils.host.mano_to_extra_spec_cpu_vendor(host_epa.cpu_vendor)
+            if cpu_vendor is not None:
+                epa_specs['capabilities:cpu_info:vendor'] = cpu_vendor
+
+        if host_epa.has_field('cpu_socket_count'):
+            cpu_socket_count = espec_utils.host.mano_to_extra_spec_cpu_socket_count(host_epa.cpu_socket_count)
+            if cpu_socket_count is not None:
+                epa_specs['capabilities:cpu_info:topology:sockets'] = cpu_socket_count
+
+        if host_epa.has_field('cpu_core_count'):
+            cpu_core_count = espec_utils.host.mano_to_extra_spec_cpu_core_count(host_epa.cpu_core_count)
+            if cpu_core_count is not None:
+                epa_specs['capabilities:cpu_info:topology:cores'] = cpu_core_count
+
+        if host_epa.has_field('cpu_core_thread_count'):
+            cpu_core_thread_count = espec_utils.host.mano_to_extra_spec_cpu_core_thread_count(host_epa.cpu_core_thread_count)
+            if cpu_core_thread_count is not None:
+                epa_specs['capabilities:cpu_info:topology:threads'] = cpu_core_thread_count
+
+        if host_epa.has_field('cpu_feature'):
+            cpu_features = []
+            espec_cpu_features = []
+            for feature in host_epa.cpu_feature:
+                cpu_features.append(feature)
+            espec_cpu_features = espec_utils.host.mano_to_extra_spec_cpu_features(cpu_features)
+            if espec_cpu_features is not None:
+                epa_specs['capabilities:cpu_info:features'] = espec_cpu_features
+        return epa_specs
+
+    @staticmethod
+    def _get_hypervisor_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for hypervisor_epa attributes
+        """
+        hypervisor_epa = {}
+        return hypervisor_epa
+
+    @staticmethod
+    def _get_vswitch_epa_specs(guest_epa):
+        """
+        Returns EPA Specs dictionary for vswitch_epa attributes
+        """
+        vswitch_epa = {}
+        return vswitch_epa
+
+    @staticmethod
+    def _get_host_aggregate_epa_specs(host_aggregate):
+        """
+        Returns EPA Specs dictionary for host aggregates
+        """
+        epa_specs = {}
+        for aggregate in host_aggregate:
+            epa_specs['aggregate_instance_extra_specs:'+aggregate.metadata_key] = aggregate.metadata_value
+
+        return epa_specs
+
+    @staticmethod
+    def _get_epa_specs(flavor):
+        """
+        Returns epa_specs dictionary based on flavor information
+        """
+        epa_specs = {}
+        if flavor.has_field('guest_epa'):
+            guest_epa = RwcalOpenstackPlugin._get_guest_epa_specs(flavor.guest_epa)
+            epa_specs.update(guest_epa)
+        if flavor.has_field('host_epa'):
+            host_epa = RwcalOpenstackPlugin._get_host_epa_specs(flavor.host_epa)
+            epa_specs.update(host_epa)
+        if flavor.has_field('hypervisor_epa'):
+            hypervisor_epa = RwcalOpenstackPlugin._get_hypervisor_epa_specs(flavor.hypervisor_epa)
+            epa_specs.update(hypervisor_epa)
+        if flavor.has_field('vswitch_epa'):
+            vswitch_epa = RwcalOpenstackPlugin._get_vswitch_epa_specs(flavor.vswitch_epa)
+            epa_specs.update(vswitch_epa)
+        if flavor.has_field('host_aggregate'):
+            host_aggregate = RwcalOpenstackPlugin._get_host_aggregate_epa_specs(flavor.host_aggregate)
+            epa_specs.update(host_aggregate)
+        return epa_specs
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_flavor(self, account, flavor):
+        """Create new flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor - flavor of the VM
+
+        Returns:
+            flavor id
+        """
+        epa_specs = RwcalOpenstackPlugin._get_epa_specs(flavor)
+        with self._use_driver(account) as drv:
+            return drv.nova_flavor_create(name      = flavor.name,
+                                          ram       = flavor.vm_flavor.memory_mb,
+                                          vcpus     = flavor.vm_flavor.vcpu_count,
+                                          disk      = flavor.vm_flavor.storage_gb,
+                                          epa_specs = epa_specs)
+
+
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        """Delete flavor.
+
+        Arguments:
+            account - a cloud account
+            flavor_id - id flavor of the VM
+        """
+        with self._use_driver(account) as drv:
+            drv.nova_flavor_delete(flavor_id)
+
+    @staticmethod
+    def _fill_epa_attributes(flavor, flavor_info):
+        """Helper function to populate the EPA attributes
+
+        Arguments:
+              flavor     : Object with EPA attributes
+              flavor_info: A dictionary of flavor_info received from openstack
+        Returns:
+              None
+        """
+        getattr(flavor, 'vm_flavor').vcpu_count  = flavor_info['vcpus']
+        getattr(flavor, 'vm_flavor').memory_mb   = flavor_info['ram']
+        getattr(flavor, 'vm_flavor').storage_gb  = flavor_info['disk']
+
+        ### If extra_specs in flavor_info
+        if not 'extra_specs' in flavor_info:
+            return
+
+        for attr in flavor_info['extra_specs']:
+            if attr == 'hw:cpu_policy':
+                cpu_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
+                if cpu_pinning_policy is not None:
+                    getattr(flavor, 'guest_epa').cpu_pinning_policy = cpu_pinning_policy
+
+            elif attr == 'hw:cpu_threads_policy':
+                cpu_thread_pinning_policy = espec_utils.guest.extra_spec_to_mano_cpu_thread_pinning_policy(flavor_info['extra_specs']['hw:cpu_threads_policy'])
+                if cpu_thread_pinning_policy is not None:
+                    getattr(flavor, 'guest_epa').cpu_thread_pinning_policy = cpu_thread_pinning_policy
+
+            elif attr == 'hw:mem_page_size':
+                mempage_size = espec_utils.guest.extra_spec_to_mano_mempage_size(flavor_info['extra_specs']['hw:mem_page_size'])
+                if mempage_size is not None:
+                    getattr(flavor, 'guest_epa').mempage_size = mempage_size
+
+
+            elif attr == 'hw:numa_nodes':
+                numa_node_count = espec_utils.guest.extra_specs_to_mano_numa_node_count(flavor_info['extra_specs']['hw:numa_nodes'])
+                if numa_node_count is not None:
+                    getattr(flavor,'guest_epa').numa_node_policy.node_cnt = numa_node_count
+
+            elif attr.startswith('hw:numa_cpus.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                numa_node.vcpu = [ int(x) for x in flavor_info['extra_specs'][attr].split(',') ]
+
+            elif attr.startswith('hw:numa_mem.'):
+                node_id = attr.split('.')[1]
+                nodes = [ n for n in flavor.guest_epa.numa_node_policy.node if n.id == int(node_id) ]
+                if nodes:
+                    numa_node = nodes[0]
+                else:
+                    numa_node = getattr(flavor,'guest_epa').numa_node_policy.node.add()
+                    numa_node.id = int(node_id)
+
+                numa_node.memory_mb =  int(flavor_info['extra_specs'][attr])
+
+            elif attr == 'hw:numa_mempolicy':
+                numa_memory_policy = espec_utils.guest.extra_to_mano_spec_numa_memory_policy(flavor_info['extra_specs']['hw:numa_mempolicy'])
+                if numa_memory_policy is not None:
+                    getattr(flavor,'guest_epa').numa_node_policy.mem_policy = numa_memory_policy
+
+            elif attr == 'trust:trusted_host':
+                trusted_execution = espec_utils.guest.extra_spec_to_mano_trusted_execution(flavor_info['extra_specs']['trust:trusted_host'])
+                if trusted_execution is not None:
+                    getattr(flavor,'guest_epa').trusted_execution = trusted_execution
+
+            elif attr == 'pci_passthrough:alias':
+                device_types = flavor_info['extra_specs']['pci_passthrough:alias']
+                for device in device_types.split(','):
+                    dev = getattr(flavor,'guest_epa').pcie_device.add()
+                    dev.device_id = device.split(':')[0]
+                    dev.count = int(device.split(':')[1])
+
+            elif attr == 'capabilities:cpu_info:model':
+                cpu_model = espec_utils.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
+                if cpu_model is not None:
+                    getattr(flavor, 'host_epa').cpu_model = cpu_model
+
+            elif attr == 'capabilities:cpu_info:arch':
+                cpu_arch = espec_utils.host.extra_specs_to_mano_cpu_arch(flavor_info['extra_specs']['capabilities:cpu_info:arch'])
+                if cpu_arch is not None:
+                    getattr(flavor, 'host_epa').cpu_arch = cpu_arch
+
+            elif attr == 'capabilities:cpu_info:vendor':
+                cpu_vendor = espec_utils.host.extra_spec_to_mano_cpu_vendor(flavor_info['extra_specs']['capabilities:cpu_info:vendor'])
+                if cpu_vendor is not None:
+                    getattr(flavor, 'host_epa').cpu_vendor = cpu_vendor
+
+            elif attr == 'capabilities:cpu_info:topology:sockets':
+                cpu_sockets = espec_utils.host.extra_spec_to_mano_cpu_socket_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:sockets'])
+                if cpu_sockets is not None:
+                    getattr(flavor, 'host_epa').cpu_socket_count = cpu_sockets
+
+            elif attr == 'capabilities:cpu_info:topology:cores':
+                cpu_cores = espec_utils.host.extra_spec_to_mano_cpu_core_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:cores'])
+                if cpu_cores is not None:
+                    getattr(flavor, 'host_epa').cpu_core_count = cpu_cores
+
+            elif attr == 'capabilities:cpu_info:topology:threads':
+                cpu_threads = espec_utils.host.extra_spec_to_mano_cpu_core_thread_count(flavor_info['extra_specs']['capabilities:cpu_info:topology:threads'])
+                if cpu_threads is not None:
+                    getattr(flavor, 'host_epa').cpu_core_thread_count = cpu_threads
+
+            elif attr == 'capabilities:cpu_info:features':
+                cpu_features = espec_utils.host.extra_spec_to_mano_cpu_features(flavor_info['extra_specs']['capabilities:cpu_info:features'])
+                if cpu_features is not None:
+                    for feature in cpu_features:
+                        getattr(flavor, 'host_epa').cpu_feature.append(feature)
+            elif attr.startswith('aggregate_instance_extra_specs:'):
+                    aggregate = getattr(flavor, 'host_aggregate').add()
+                    aggregate.metadata_key = ":".join(attr.split(':')[1::])
+                    aggregate.metadata_value = flavor_info['extra_specs'][attr]
+
+    @staticmethod
+    def _fill_flavor_info(flavor_info):
+        """Create a GI object from flavor info dictionary
+
+        Converts Flavor information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            flavor_info: Flavor information from openstack
+
+        Returns:
+             Object of class FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name                       = flavor_info['name']
+        flavor.id                         = flavor_info['id']
+        RwcalOpenstackPlugin._fill_epa_attributes(flavor, flavor_info)
+        return flavor
+
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of flavors
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            flavors = drv.nova_flavor_list()
+        for flv in flavors:
+            response.flavorinfo_list.append(RwcalOpenstackPlugin._fill_flavor_info(flv))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, id):
+        """Return flavor information.
+
+        Arguments:
+            account - a cloud account
+            id - an id for the flavor
+
+        Returns:
+            Flavor info item
+        """
+        with self._use_driver(account) as drv:
+            flavor = drv.nova_flavor_get(id)
+        return RwcalOpenstackPlugin._fill_flavor_info(flavor)
+
+
+    def _fill_network_info(self, network_info, account):
+        """Create a GI object from network info dictionary
+
+        Converts Network information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from openstack
+            account - a cloud account
+
+        Returns:
+            Network info item
+        """
+        network                  = RwcalYang.NetworkInfoItem()
+        network.network_name     = network_info['name']
+        network.network_id       = network_info['id']
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            network.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            network.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            network.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        if 'subnets' in network_info and network_info['subnets']:
+            subnet_id = network_info['subnets'][0]
+            with self._use_driver(account) as drv:
+                subnet = drv.neutron_subnet_get(subnet_id)
+            network.subnet = subnet['cidr']
+        return network
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        """Return a list of networks
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of networks
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            networks = drv.neutron_network_list()
+        for network in networks:
+            response.networkinfo_list.append(self._fill_network_info(network, account))
+        return response
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, id):
+        """Return a network
+
+        Arguments:
+            account - a cloud account
+            id - an id for the network
+
+        Returns:
+            Network info item
+        """
+        with self._use_driver(account) as drv:
+            network = drv.neutron_network_get(id)
+        return self._fill_network_info(network, account)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_network(self, account, network):
+        """Create a new network
+
+        Arguments:
+            account - a cloud account
+            network - Network object
+
+        Returns:
+            Network id
+        """
+        kwargs = {}
+        kwargs['name']            = network.network_name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+
+        if network.has_field('provider_network'):
+            if network.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = network.provider_network.physical_network
+            if network.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = network.provider_network.overlay_type.lower()
+            if network.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = network.provider_network.segmentation_id
+
+        with self._use_driver(account) as drv:
+            network_id = drv.neutron_network_create(**kwargs)
+            drv.neutron_subnet_create(network_id = network_id,
+                                      cidr = network.subnet)
+        return network_id
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        """Delete a network
+
+        Arguments:
+            account - a cloud account
+            network_id - an id for the network
+        """
+        with self._use_driver(account) as drv:
+            drv.neutron_network_delete(network_id)
+
+    @staticmethod
+    def _fill_port_info(port_info):
+        """Create a GI object from port info dictionary
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from openstack
+
+        Returns:
+            Port info item
+        """
+        port = RwcalYang.PortInfoItem()
+
+        port.port_name  = port_info['name']
+        port.port_id    = port_info['id']
+        port.network_id = port_info['network_id']
+        port.port_state = port_info['status']
+        if 'device_id' in port_info:
+            port.vm_id = port_info['device_id']
+        if 'fixed_ips' in port_info:
+            port.ip_address = port_info['fixed_ips'][0]['ip_address']
+        return port
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        """Return a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for the port
+
+        Returns:
+            Port info item
+        """
+        with self._use_driver(account) as drv:
+            port = drv.neutron_port_get(port_id)
+
+        return RwcalOpenstackPlugin._fill_port_info(port)
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        """Return a list of ports
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            Port info list
+        """
+        response = RwcalYang.VimResources()
+        with self._use_driver(account) as drv:
+            ports = drv.neutron_port_list(*{})
+        for port in ports:
+            response.portinfo_list.append(RwcalOpenstackPlugin._fill_port_info(port))
+        return response
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_port(self, account, port):
+        """Create a new port
+
+        Arguments:
+            account - a cloud account
+            port - port object
+
+        Returns:
+            Port id
+        """
+        kwargs = {}
+        kwargs['name'] = port.port_name
+        kwargs['network_id'] = port.network_id
+        kwargs['admin_state_up'] = True
+        if port.has_field('vm_id'):
+            kwargs['vm_id'] = port.vm_id
+        if port.has_field('port_type'):
+            kwargs['port_type'] = port.port_type
+        else:
+            kwargs['port_type'] = "normal"
+
+        with self._use_driver(account) as drv:
+            return drv.neutron_port_create(**kwargs)
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        """Delete a port
+
+        Arguments:
+            account - a cloud account
+            port_id - an id for port
+        """
+        with self._use_driver(account) as drv:
+            drv.neutron_port_delete(port_id)
+
+    @rwstatus(ret_on_failure=[""])
+    def do_add_host(self, account, host):
+        """Add a new host
+
+        Arguments:
+            account - a cloud account
+            host - a host object
+
+        Returns:
+            An id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        """Remove a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for the host
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        """Return a host
+
+        Arguments:
+            account - a cloud account
+            host_id - an id for host
+
+        Returns:
+            Host info item
+        """
+        raise NotImplementedError
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        """Return a list of hosts
+
+        Arguments:
+            account - a cloud account
+
+        Returns:
+            List of hosts
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def _fill_connection_point_info(c_point, port_info):
+        """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+
+        Converts Port information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            port_info - Port information from openstack
+        Returns:
+            Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+        """
+        c_point.name = port_info['name']
+        c_point.connection_point_id = port_info['id']
+        if ('fixed_ips' in port_info) and (len(port_info['fixed_ips']) >= 1):
+            if 'ip_address' in port_info['fixed_ips'][0]:
+                c_point.ip_address = port_info['fixed_ips'][0]['ip_address']
+        if port_info['status'] == 'ACTIVE':
+            c_point.state = 'active'
+        else:
+            c_point.state = 'inactive'
+        if 'network_id' in port_info:
+            c_point.virtual_link_id = port_info['network_id']
+        if ('device_id' in port_info) and (port_info['device_id']):
+            c_point.vdu_id = port_info['device_id']
+
+    @staticmethod
+    def _fill_virtual_link_info(network_info, port_list, subnet):
+        """Create a GI object for VirtualLinkInfoParams
+
+        Converts Network and Port information dictionary object
+        returned by openstack driver into Protobuf Gi Object
+
+        Arguments:
+            network_info - Network information from openstack
+            port_list - A list of port information from openstack
+            subnet: Subnet information from openstack
+        Returns:
+            Protobuf Gi object for VirtualLinkInfoParams
+        """
+        link = RwcalYang.VirtualLinkInfoParams()
+        link.name  = network_info['name']
+        if network_info['status'] == 'ACTIVE':
+            link.state = 'active'
+        else:
+            link.state = 'inactive'
+        link.virtual_link_id = network_info['id']
+        for port in port_list:
+            if port['device_owner'] == 'compute:None':
+                c_point = link.connection_points.add()
+                RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if subnet != None:
+            link.subnet = subnet['cidr']
+
+        if ('provider:network_type' in network_info) and (network_info['provider:network_type'] != None):
+            link.provider_network.overlay_type = network_info['provider:network_type'].upper()
+        if ('provider:segmentation_id' in network_info) and (network_info['provider:segmentation_id']):
+            link.provider_network.segmentation_id = network_info['provider:segmentation_id']
+        if ('provider:physical_network' in network_info) and (network_info['provider:physical_network']):
+            link.provider_network.physical_network = network_info['provider:physical_network'].upper()
+
+        return link
+
+    @staticmethod
+    def _fill_vdu_info(vm_info, flavor_info, mgmt_network, port_list, server_group):
+        """Create a GI object for VDUInfoParams
+
+        Converts VM information dictionary object returned by openstack
+        driver into Protobuf Gi Object
+
+        Arguments:
+            vm_info - VM information from openstack
+            flavor_info - VM Flavor information from openstack
+            mgmt_network - Management network
+            port_list - A list of port information from openstack
+            server_group - A list (with one element or empty list) of server group to which this VM belongs
+        Returns:
+            Protobuf Gi object for VDUInfoParams
+        """
+        vdu = RwcalYang.VDUInfoParams()
+        vdu.name = vm_info['name']
+        vdu.vdu_id = vm_info['id']
+        for network_name, network_info in vm_info['addresses'].items():
+            if network_info and network_name == mgmt_network:
+                for interface in network_info:
+                    if 'OS-EXT-IPS:type' in interface:
+                        if interface['OS-EXT-IPS:type'] == 'fixed':
+                            vdu.management_ip = interface['addr']
+                        elif interface['OS-EXT-IPS:type'] == 'floating':
+                            vdu.public_ip = interface['addr']
+
+        # Look for any metadata
+        for key, value in vm_info['metadata'].items():
+            if key == 'node_id':
+                vdu.node_id = value
+        if ('image' in vm_info) and ('id' in vm_info['image']):
+            vdu.image_id = vm_info['image']['id']
+        if ('flavor' in vm_info) and ('id' in vm_info['flavor']):
+            vdu.flavor_id = vm_info['flavor']['id']
+
+        if vm_info['status'] == 'ACTIVE':
+            vdu.state = 'active'
+        elif vm_info['status'] == 'ERROR':
+            vdu.state = 'failed'
+        else:
+            vdu.state = 'inactive'
+
+        if 'availability_zone' in vm_info:
+            vdu.availability_zone = vm_info['availability_zone']
+
+        if server_group:
+            vdu.server_group.name = server_group[0]
+
+        vdu.cloud_type  = 'openstack'
+        # Fill the port information
+        for port in port_list:
+            c_point = vdu.connection_points.add()
+            RwcalOpenstackPlugin._fill_connection_point_info(c_point, port)
+
+        if flavor_info is not None:
+            RwcalOpenstackPlugin._fill_epa_attributes(vdu, flavor_info)
+        return vdu
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        """Create a new virtual link
+
+        Arguments:
+            account     - a cloud account
+            link_params - information that defines the type of VDU to create
+
+        Returns:
+            The vdu_id
+        """
+        kwargs = {}
+        kwargs['name']            = link_params.name
+        kwargs['admin_state_up']  = True
+        kwargs['external_router'] = False
+        kwargs['shared']          = False
+
+        if link_params.has_field('provider_network'):
+            if link_params.provider_network.has_field('physical_network'):
+                kwargs['physical_network'] = link_params.provider_network.physical_network
+            if link_params.provider_network.has_field('overlay_type'):
+                kwargs['network_type'] = link_params.provider_network.overlay_type.lower()
+            if link_params.provider_network.has_field('segmentation_id'):
+                kwargs['segmentation_id'] = link_params.provider_network.segmentation_id
+
+
+        with self._use_driver(account) as drv:
+            try:
+                network_id = drv.neutron_network_create(**kwargs)
+            except Exception as e:
+                self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
+                raise
+            
+            kwargs = {'network_id' : network_id,
+                      'dhcp_params': {'enable_dhcp': True},
+                      'gateway_ip' : None,}
+            
+            if link_params.ip_profile_params.has_field('ip_version'):
+                kwargs['ip_version'] = 6 if link_params.ip_profile_params.ip_version == 'ipv6' else 4
+            else:
+                kwargs['ip_version'] = 4
+
+            if link_params.ip_profile_params.has_field('subnet_address'):
+                kwargs['cidr'] = link_params.ip_profile_params.subnet_address
+            elif link_params.ip_profile_params.has_field('subnet_prefix_pool'):
+                subnet_pool = drv.netruon_subnetpool_by_name(link_params.ip_profile_params.subnet_prefix_pool)
+                if subnet_pool is None:
+                    self.log.error("Could not find subnet pool with name :%s to be used for network: %s",
+                                   link_params.ip_profile_params.subnet_prefix_pool,
+                                   link_params.name)
+                    raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
+                
+                kwargs['subnetpool_id'] = subnet_pool['id']
+            elif link_params.has_field('subnet'):
+                kwargs['cidr'] = link_params.subnet
+            else:
+                assert 0, "No IP Prefix or Pool name specified"
+
+            if link_params.ip_profile_params.has_field('dhcp_params'):
+                if link_params.ip_profile_params.dhcp_params.has_field('enabled'):
+                    kwargs['dhcp_params']['enable_dhcp'] = link_params.ip_profile_params.dhcp_params.enabled
+                if link_params.ip_profile_params.dhcp_params.has_field('start_address'):
+                    kwargs['dhcp_params']['start_address']  = link_params.ip_profile_params.dhcp_params.start_address
+                if link_params.ip_profile_params.dhcp_params.has_field('count'):
+                    kwargs['dhcp_params']['count']  = link_params.ip_profile_params.dhcp_params.count
+    
+            if link_params.ip_profile_params.has_field('dns_server'):
+                kwargs['dns_server'] = []
+                for server in link_params.ip_profile_params.dns_server:
+                    kwargs['dns_server'].append(server)
+
+            if link_params.ip_profile_params.has_field('gateway_address'):
+                kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
+                
+            drv.neutron_subnet_create(**kwargs)
+            
+        return network_id
+
+
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        """Delete a virtual link
+
+        Arguments:
+            account - a cloud account
+            link_id - id for the virtual-link to be deleted
+
+        Returns:
+            None
+        """
+        if not link_id:
+            self.log.error("Empty link_id during the virtual link deletion")
+            raise Exception("Empty link_id during the virtual link deletion")
+
+        with self._use_driver(account) as drv:
+            port_list = drv.neutron_port_list(**{'network_id': link_id})
+
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+        self.do_delete_network(account, link_id, no_rwstatus=True)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        """Get information about virtual link.
+
+        Arguments:
+            account  - a cloud account
+            link_id  - id for the virtual-link
+
+        Returns:
+            Object of type RwcalYang.VirtualLinkInfoParams
+        """
+        if not link_id:
+            self.log.error("Empty link_id during the virtual link get request")
+            raise Exception("Empty link_id during the virtual link get request")
+
+        with self._use_driver(account) as drv:
+            network = drv.neutron_network_get(link_id)
+            if network:
+                port_list = drv.neutron_port_list(**{'network_id': network['id']})
+                if 'subnets' in network:
+                    subnet = drv.neutron_subnet_get(network['subnets'][0])
+                else:
+                    subnet = None
+                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+            else:
+                virtual_link = None
+            return virtual_link
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link_list(self, account):
+        """Get information about all the virtual links
+
+        Arguments:
+            account  - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VirtualLinkInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            networks = drv.neutron_network_list()
+            for network in networks:
+                port_list = drv.neutron_port_list(**{'network_id': network['id']})
+                if ('subnets' in network) and (network['subnets']):
+                    subnet = drv.neutron_subnet_get(network['subnets'][0])
+                else:
+                    subnet = None
+                virtual_link = RwcalOpenstackPlugin._fill_virtual_link_info(network, port_list, subnet)
+                vnf_resources.virtual_link_info_list.append(virtual_link)
+            return vnf_resources
+
+    def _create_connection_point(self, account, c_point):
+        """
+        Create a connection point
+        Arguments:
+           account  - a cloud account
+           c_point  - connection_points
+        """
+        kwargs = {}
+        kwargs['name'] = c_point.name
+        kwargs['network_id'] = c_point.virtual_link_id
+        kwargs['admin_state_up'] = True
+
+        if c_point.type_yang == 'VIRTIO':
+            kwargs['port_type'] = 'normal'
+        elif c_point.type_yang == 'SR_IOV':
+            kwargs['port_type'] = 'direct'
+        else:
+            raise NotImplementedError("Port Type: %s not supported" %(c_point.port_type))
+
+        with self._use_driver(account) as drv:
+            if c_point.has_field('security_group'):
+                group = drv.neutron_security_group_by_name(c_point.security_group)
+                if group is not None:
+                    kwargs['security_groups'] = [group['id']]
+            return drv.neutron_port_create(**kwargs)
+
+    def _allocate_floating_ip(self, drv, pool_name):
+        """
+        Allocate a floating_ip. If unused floating_ip exists then its reused.
+        Arguments:
+          drv:       OpenstackDriver instance
+          pool_name: Floating IP pool name
+
+        Returns:
+          An object of floating IP nova class (novaclient.v2.floating_ips.FloatingIP)
+        """
+
+        # available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
+
+        # if pool_name is not None:
+        #     ### Filter further based on IP address
+        #     available_ip = [ ip for ip in available_ip if ip.pool == pool_name ]
+
+        # if not available_ip:
+        #     floating_ip = drv.nova_floating_ip_create(pool_name)
+        # else:
+        #     floating_ip = available_ip[0]
+
+        floating_ip = drv.nova_floating_ip_create(pool_name)
+        return floating_ip
+
+    def _match_vm_flavor(self, required, available):
+        self.log.info("Matching VM Flavor attributes")
+        if available.vcpu_count != required.vcpu_count:
+            self.log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+                            required.vcpu_count,
+                            available.vcpu_count)
+            return False
+        if available.memory_mb != required.memory_mb:
+            self.log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+                            required.memory_mb,
+                            available.memory_mb)
+            return False
+        if available.storage_gb != required.storage_gb:
+            self.log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+                            required.storage_gb,
+                            available.storage_gb)
+            return False
+        self.log.debug("VM Flavor match found")
+        return True
+
+    def _match_guest_epa(self, required, available):
+        self.log.info("Matching Guest EPA attributes")
+        if required.has_field('pcie_device'):
+            self.log.debug("Matching pcie_device")
+            if available.has_field('pcie_device') == False:
+                self.log.debug("Matching pcie_device failed. Not available in flavor")
+                return False
+            else:
+                for dev in required.pcie_device:
+                    if not [ d for d in available.pcie_device
+                             if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+                        self.log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
+                        return False
+        elif available.has_field('pcie_device'):
+            self.log.debug("Rejecting available flavor because pcie_device not required but available")
+            return False
+                        
+                    
+        if required.has_field('mempage_size'):
+            self.log.debug("Matching mempage_size")
+            if available.has_field('mempage_size') == False:
+                self.log.debug("Matching mempage_size failed. Not available in flavor")
+                return False
+            else:
+                if required.mempage_size != available.mempage_size:
+                    self.log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
+                    return False
+        elif available.has_field('mempage_size'):
+            self.log.debug("Rejecting available flavor because mempage_size not required but available")
+            return False
+        
+        if required.has_field('cpu_pinning_policy'):
+            self.log.debug("Matching cpu_pinning_policy")
+            if required.cpu_pinning_policy != 'ANY':
+                if available.has_field('cpu_pinning_policy') == False:
+                    self.log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+                    return False
+                else:
+                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
+                        self.log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
+                        return False
+        elif available.has_field('cpu_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+            return False
+        
+        if required.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Matching cpu_thread_pinning_policy")
+            if available.has_field('cpu_thread_pinning_policy') == False:
+                self.log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+                    self.log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+                    return False
+        elif available.has_field('cpu_thread_pinning_policy'):
+            self.log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+            return False
+
+        if required.has_field('trusted_execution'):
+            self.log.debug("Matching trusted_execution")
+            if required.trusted_execution == True:
+                if available.has_field('trusted_execution') == False:
+                    self.log.debug("Matching trusted_execution failed. Not available in flavor")
+                    return False
+                else:
+                    if required.trusted_execution != available.trusted_execution:
+                        self.log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
+                        return False
+        elif available.has_field('trusted_execution'):
+            self.log.debug("Rejecting available flavor because trusted_execution not required but available")
+            return False
+        
+        if required.has_field('numa_node_policy'):
+            self.log.debug("Matching numa_node_policy")
+            if available.has_field('numa_node_policy') == False:
+                self.log.debug("Matching numa_node_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Matching numa_node_policy node_cnt")
+                    if available.numa_node_policy.has_field('node_cnt') == False:
+                        self.log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+                            self.log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+                            return False
+                elif available.numa_node_policy.has_field('node_cnt'):
+                    self.log.debug("Rejecting available flavor because numa node count not required but available")
+                    return False
+                
+                if required.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Matching numa_node_policy mem_policy")
+                    if available.numa_node_policy.has_field('mem_policy') == False:
+                        self.log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+                            self.log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+                            return False
+                elif available.numa_node_policy.has_field('mem_policy'):
+                    self.log.debug("Rejecting available flavor because num node mem_policy not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('node'):
+                    self.log.debug("Matching numa_node_policy nodes configuration")
+                    if available.numa_node_policy.has_field('node') == False:
+                        self.log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+                        return False
+                    for required_node in required.numa_node_policy.node:
+                        self.log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
+                        numa_match = False
+                        for available_node in available.numa_node_policy.node:
+                            if required_node.id != available_node.id:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.vcpu != available_node.vcpu:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.memory_mb != available_node.memory_mb:
+                                self.log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            numa_match = True
+                        if numa_match == False:
+                            return False
+                elif available.numa_node_policy.has_field('node'):
+                    self.log.debug("Rejecting available flavor because numa nodes not required but available")
+                    return False
+        elif available.has_field('numa_node_policy'):
+            self.log.debug("Rejecting available flavor because numa_node_policy not required but available")
+            return False
+        self.log.info("Successful match for Guest EPA attributes")
+        return True
+
+    def _match_vswitch_epa(self, required, available):
+        self.log.debug("VSwitch EPA match found")
+        return True
+
+    def _match_hypervisor_epa(self, required, available):
+        self.log.debug("Hypervisor EPA match found")
+        return True
+
+    def _match_host_epa(self, required, available):
+        self.log.info("Matching Host EPA attributes")
+        if required.has_field('cpu_model'):
+            self.log.debug("Matching CPU model")
+            if available.has_field('cpu_model') == False:
+                self.log.debug("Matching CPU model failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+                    self.log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
+                    return False
+        elif available.has_field('cpu_model'):
+            self.log.debug("Rejecting available flavor because cpu_model not required but available")
+            return False
+        
+        if required.has_field('cpu_arch'):
+            self.log.debug("Matching CPU architecture")
+            if available.has_field('cpu_arch') == False:
+                self.log.debug("Matching CPU architecture failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+                    self.log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
+                    return False
+        elif available.has_field('cpu_arch'):
+            self.log.debug("Rejecting available flavor because cpu_arch not required but available")
+            return False
+        
+        if required.has_field('cpu_vendor'):
+            self.log.debug("Matching CPU vendor")
+            if available.has_field('cpu_vendor') == False:
+                self.log.debug("Matching CPU vendor failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+                    self.log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
+                    return False
+        elif available.has_field('cpu_vendor'):
+            self.log.debug("Rejecting available flavor because cpu_vendor not required but available")
+            return False
+
+        if required.has_field('cpu_socket_count'):
+            self.log.debug("Matching CPU socket count")
+            if available.has_field('cpu_socket_count') == False:
+                self.log.debug("Matching CPU socket count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_socket_count != available.cpu_socket_count:
+                    self.log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
+                    return False
+        elif available.has_field('cpu_socket_count'):
+            self.log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_count'):
+            self.log.debug("Matching CPU core count")
+            if available.has_field('cpu_core_count') == False:
+                self.log.debug("Matching CPU core count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_count != available.cpu_core_count:
+                    self.log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
+                    return False
+        elif available.has_field('cpu_core_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_thread_count'):
+            self.log.debug("Matching CPU core thread count")
+            if available.has_field('cpu_core_thread_count') == False:
+                self.log.debug("Matching CPU core thread count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_thread_count != available.cpu_core_thread_count:
+                    self.log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
+                    return False
+        elif available.has_field('cpu_core_thread_count'):
+            self.log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+            return False
+    
+        if required.has_field('cpu_feature'):
+            self.log.debug("Matching CPU feature list")
+            if available.has_field('cpu_feature') == False:
+                self.log.debug("Matching CPU feature list failed. Not available in flavor")
+                return False
+            else:
+                for feature in required.cpu_feature:
+                    if feature not in available.cpu_feature:
+                        self.log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
+                        return False
+        elif available.has_field('cpu_feature'):
+            self.log.debug("Rejecting available flavor because cpu_feature not required but available")
+            return False
+        self.log.info("Successful match for Host EPA attributes")            
+        return True
+
+
+    def _match_placement_group_inputs(self, required, available):
+        self.log.info("Matching Host aggregate attributes")
+        
+        if not required and not available:
+            # Host aggregate not required and not available => success
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        if required and available:
+            # Host aggregate requested and available => Do a match and decide
+            xx = [ x.as_dict() for x in required ]
+            yy = [ y.as_dict() for y in available ]
+            for i in xx:
+                if i not in yy:
+                    self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+                    return False
+            self.log.info("Successful match for Host Aggregate attributes")
+            return True
+        else:
+            # Either of following conditions => Failure
+            #  - Host aggregate required but not available
+            #  - Host aggregate not required but available
+            self.log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+            return False
+                    
+    def match_epa_params(self, resource_info, request_params):
+        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+                                       getattr(resource_info, 'vm_flavor'))
+        if result == False:
+            self.log.debug("VM Flavor mismatched")
+            return False
+
+        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+                                       getattr(resource_info, 'guest_epa'))
+        if result == False:
+            self.log.debug("Guest EPA mismatched")
+            return False
+
+        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+                                         getattr(resource_info, 'vswitch_epa'))
+        if result == False:
+            self.log.debug("Vswitch EPA mismatched")
+            return False
+
+        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+                                            getattr(resource_info, 'hypervisor_epa'))
+        if result == False:
+            self.log.debug("Hypervisor EPA mismatched")
+            return False
+
+        result = self._match_host_epa(getattr(request_params, 'host_epa'),
+                                      getattr(resource_info, 'host_epa'))
+        if result == False:
+            self.log.debug("Host EPA mismatched")
+            return False
+
+        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+                                                    getattr(resource_info, 'host_aggregate'))
+
+        if result == False:
+            self.log.debug("Host Aggregate mismatched")
+            return False
+        
+        return True
+
+    def _select_resource_flavor(self, account, vdu_init):
+        """ 
+            Select a existing flavor if it matches the request or create new flavor
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
+        epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+        rc, response = self.do_get_flavor_list(account)
+        if rc != RwTypes.RwStatus.SUCCESS:
+            self.log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                        account.name)
+            raise OpenstackCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(account.name))
+
+        flavor_id = None
+        flavor_list = response.flavorinfo_list
+        self.log.debug("Received %d flavor information from RW.CAL", len(flavor_list))
+        for flv in flavor_list:
+            self.log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                       vdu_init.name, flv)
+            if self.match_epa_params(flv, vdu_init):
+                self.log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                           vdu_init.name, flv.name, flv.id)
+                return flv.id
+
+        if account.openstack.dynamic_flavor_support is False:
+            self.log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", vdu_init.name)
+            raise OpenstackCALOperationFailure("No resource available with matching EPA attributes")
+        else:
+            rc,flavor_id = self.do_create_flavor(account,flavor)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self.log.error("Create-flavor operation failed for cloud account: %s",
+                        account.name)
+                raise OpenstackCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(account.name))
+            return flavor_id
+
+    @rwcalstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        """Create a new virtual deployment unit
+
+        Arguments:
+            account     - a cloud account
+            vdu_init  - information about VDU to create (RwcalYang.VDUInitParams)
+
+        Returns:
+            The vdu_id
+        """
+        ### First create required number of ports aka connection points
+        with self._use_driver(account) as drv:
+            ### If floating_ip is required and we don't have one, better fail before any further allocation
+            if vdu_init.has_field('allocate_public_address') and vdu_init.allocate_public_address:
+                if account.openstack.has_field('floating_ip_pool'):
+                    pool_name = account.openstack.floating_ip_pool
+                else:
+                    pool_name = None
+                floating_ip = self._allocate_floating_ip(drv, pool_name)
+            else:
+                floating_ip = None
+
+        port_list = []
+        network_list = []
+        for c_point in vdu_init.connection_points:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        if not vdu_init.has_field('flavor_id'):
+            vdu_init.flavor_id = self._select_resource_flavor(account,vdu_init)
+
+        with self._use_driver(account) as drv:
+            ### Now Create VM
+            vm           = RwcalYang.VMInfoItem()
+            vm.vm_name   = vdu_init.name
+            vm.flavor_id = vdu_init.flavor_id
+            vm.image_id  = vdu_init.image_id
+            vm_network   = vm.network_list.add()
+            vm_network.network_id = drv._mgmt_network_id
+            if vdu_init.has_field('vdu_init') and vdu_init.vdu_init.has_field('userdata'):
+                vm.cloud_init.userdata = vdu_init.vdu_init.userdata
+
+            if vdu_init.has_field('node_id'):
+                vm.user_tags.node_id   = vdu_init.node_id;
+
+            if vdu_init.has_field('availability_zone') and vdu_init.availability_zone.has_field('name'):
+                vm.availability_zone = vdu_init.availability_zone.name
+
+            if vdu_init.has_field('server_group'):
+                ### Get list of server group in openstack for name->id mapping
+                openstack_group_list = drv.nova_server_group_list()
+                group_id = [ i['id'] for i in openstack_group_list if i['name'] == vdu_init.server_group.name]
+                if len(group_id) != 1:
+                    raise OpenstackServerGroupError("VM placement failed. Server Group %s not found in openstack. Available groups" %(vdu_init.server_group.name, [i['name'] for i in openstack_group_list]))
+                vm.server_group = group_id[0]
+
+            for port_id in port_list:
+                port = vm.port_list.add()
+                port.port_id = port_id
+
+            pci_assignement = self.prepare_vpci_metadata(drv, vdu_init)
+            if pci_assignement != '':
+                vm.user_tags.pci_assignement = pci_assignement
+
+            vm_id = self.do_create_vm(account, vm, no_rwstatus=True)
+            self.prepare_vdu_on_boot(account, vm_id, floating_ip)
+            return vm_id
+
+    def prepare_vpci_metadata(self, drv, vdu_init):
+        pci_assignement = ''
+        ### TEF specific metadata creation for
+        virtio_vpci = []
+        sriov_vpci = []
+        virtio_meta = ''
+        sriov_meta = ''
+        ### For MGMT interface
+        if vdu_init.has_field('mgmt_vpci'):
+            xx = 'u\''+ drv._mgmt_network_id + '\' :[[u\'' + vdu_init.mgmt_vpci + '\', ' + '\'\']]'
+            virtio_vpci.append(xx)
+
+        for c_point in vdu_init.connection_points:
+            if c_point.has_field('vpci'):
+                if c_point.has_field('vpci') and c_point.type_yang == 'VIRTIO':
+                    xx = 'u\''+c_point.virtual_link_id + '\' :[[u\'' + c_point.vpci + '\', ' + '\'\']]'
+                    virtio_vpci.append(xx)
+                elif c_point.has_field('vpci') and c_point.type_yang == 'SR_IOV':
+                    xx = '[u\'' + c_point.vpci + '\', ' + '\'\']'
+                    sriov_vpci.append(xx)
+
+        if virtio_vpci:
+            virtio_meta += ','.join(virtio_vpci)
+
+        if sriov_vpci:
+            sriov_meta = 'u\'VF\': ['
+            sriov_meta += ','.join(sriov_vpci)
+            sriov_meta += ']'
+
+        if virtio_meta != '':
+            pci_assignement +=  virtio_meta
+            pci_assignement += ','
+
+        if sriov_meta != '':
+            pci_assignement +=  sriov_meta
+
+        if pci_assignement != '':
+            pci_assignement = '{' + pci_assignement + '}'
+
+        return pci_assignement
+
+
+
+    def prepare_vdu_on_boot(self, account, server_id, floating_ip):
+        cmd = PREPARE_VM_CMD.format(auth_url     = account.openstack.auth_url,
+                                    username     = account.openstack.key,
+                                    password     = account.openstack.secret,
+                                    tenant_name  = account.openstack.tenant,
+                                    mgmt_network = account.openstack.mgmt_network,
+                                    server_id    = server_id)
+
+        if floating_ip is not None:
+            cmd += (" --floating_ip "+ floating_ip.ip)
+
+        exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
+        exec_cmd = exec_path+'/'+cmd
+        self.log.info("Running command: %s" %(exec_cmd))
+        subprocess.call(exec_cmd, shell=True)
+
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        """Modify Properties of existing virtual deployment unit
+
+        Arguments:
+            account     -  a cloud account
+            vdu_modify  -  Information about VDU Modification (RwcalYang.VDUModifyParams)
+        """
+        ### First create required number of ports aka connection points
+        port_list = []
+        network_list = []
+        for c_point in vdu_modify.connection_points_add:
+            if c_point.virtual_link_id in network_list:
+                assert False, "Only one port per network supported. Refer: http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/nfv-multiple-if-1-net.html"
+            else:
+                network_list.append(c_point.virtual_link_id)
+            port_id = self._create_connection_point(account, c_point)
+            port_list.append(port_id)
+
+        ### Now add the ports to VM
+        for port_id in port_list:
+            with self._use_driver(account) as drv:
+                drv.nova_server_add_port(vdu_modify.vdu_id, port_id)
+
+        ### Delete the requested connection_points
+        for c_point in vdu_modify.connection_points_remove:
+            self.do_delete_port(account, c_point.connection_point_id, no_rwstatus=True)
+
+        if vdu_modify.has_field('image_id'):
+            with self._use_driver(account) as drv:
+                drv.nova_server_rebuild(vdu_modify.vdu_id, vdu_modify.image_id)
+
+
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        """Delete a virtual deployment unit
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu to be deleted
+
+        Returns:
+            None
+        """
+        if not vdu_id:
+            self.log.error("empty vdu_id during the vdu deletion")
+            return
+
+        with self._use_driver(account) as drv:
+            ### Get list of floating_ips associated with this instance and delete them
+            floating_ips = [ f for f in drv.nova_floating_ip_list() if f.instance_id == vdu_id ]
+            for f in floating_ips:
+                drv.nova_drv.floating_ip_delete(f)
+
+            ### Get list of port on VM and delete them.
+            port_list = drv.neutron_port_list(**{'device_id': vdu_id})
+
+        for port in port_list:
+            if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
+                self.do_delete_port(account, port['id'], no_rwstatus=True)
+
+        self.do_delete_vm(account, vdu_id, no_rwstatus=True)
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        """Get information about a virtual deployment unit.
+
+        Arguments:
+            account - a cloud account
+            vdu_id  - id for the vdu
+
+        Returns:
+            Object of type RwcalYang.VDUInfoParams
+        """
+        with self._use_driver(account) as drv:
+
+            ### Get list of ports excluding the one for management network
+            port_list = [p for p in drv.neutron_port_list(**{'device_id': vdu_id}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+            vm = drv.nova_server_get(vdu_id)
+
+            flavor_info = None
+            if ('flavor' in vm) and ('id' in vm['flavor']):
+                try:
+                    flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+                except Exception as e:
+                    self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+
+            openstack_group_list = drv.nova_server_group_list()
+            server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+            vdu_info = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                           flavor_info,
+                                                           account.openstack.mgmt_network,
+                                                           port_list,
+                                                           server_group)
+            if vdu_info.state == 'active':
+                try:
+                    console_info = drv.nova_server_console(vdu_info.vdu_id)
+                except Exception as e:
+                    pass
+                else:
+                    vdu_info.console_url = console_info['console']['url']
+                    pass
+
+            return vdu_info
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu_list(self, account):
+        """Get information about all the virtual deployment units
+
+        Arguments:
+            account     - a cloud account
+
+        Returns:
+            A list of objects of type RwcalYang.VDUInfoParams
+        """
+        vnf_resources = RwcalYang.VNFResources()
+        with self._use_driver(account) as drv:
+            vms = drv.nova_server_list()
+            for vm in vms:
+                ### Get list of ports excluding one for management network
+                port_list = [p for p in drv.neutron_port_list(**{'device_id': vm['id']}) if p['network_id'] != drv.get_mgmt_network_id()]
+
+                flavor_info = None
+
+                if ('flavor' in vm) and ('id' in vm['flavor']):
+                    try:
+                        flavor_info = drv.nova_flavor_get(vm['flavor']['id'])
+                    except Exception as e:
+                        self.log.critical("Exception encountered while attempting to get flavor info for flavor_id: %s. Exception: %s" %(vm['flavor']['id'], str(e)))
+
+                else:
+                    flavor_info = None
+
+                openstack_group_list = drv.nova_server_group_list()
+                server_group = [ i['name'] for i in openstack_group_list if vm['id'] in i['members']]
+
+                vdu = RwcalOpenstackPlugin._fill_vdu_info(vm,
+                                                          flavor_info,
+                                                          account.openstack.mgmt_network,
+                                                          port_list,
+                                                          server_group)
+                if vdu.state == 'active':
+                    try:
+                        console_info = drv.nova_server_console(vdu.vdu_id)
+                    except Exception as e:
+                        pass
+                    else:
+                        vdu.console_url = console_info['console']['url']
+                        pass
+                vnf_resources.vdu_info_list.append(vdu)
+            return vnf_resources
+
+
diff --git a/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt b/rwcal/plugins/vala/rwcal_vsphere/CMakeLists.txt
new file mode 100644 (file)
index 0000000..092d941
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwcal_vsphere rwcal_vsphere.py)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/Makefile b/rwcal/plugins/vala/rwcal_vsphere/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py b/rwcal/plugins/vala/rwcal_vsphere/rift/vsphere/vsphere.py
new file mode 100644 (file)
index 0000000..e726ea3
--- /dev/null
@@ -0,0 +1,86 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import libcloud.compute.providers
+import libcloud.compute.types
+
+from gi.repository import RwcalYang
+
+
+from . import core
+
+
+class Vsphere(core.Cloud):
+    """This class implements the abstract methods in the Cloud class.
+    This is the Vsphere CAL driver."""
+
+    def __init__(self):
+        super(Vsphere, self).__init__()
+        self._driver_class = libcloud.compute.providers.get_driver(
+                libcloud.compute.providers.Provider.VSPHERE)
+
+    def driver(self, account):
+        return self._driver_class(
+                username=account.username,
+                passwork=account.password,
+                url=url,
+                )
+
+    def get_image_list(self, account):
+        """
+        Return a list of the names of all available images.
+        """
+        images = self.driver(account).list_images()
+        return [image.name for image in images]
+
+    def create_vm(self, account, vminfo):
+        """
+        Create a new virtual machine.
+
+        @param account  - account information used authenticate the create of
+                          the virtual machine 
+        @param vmfinfo  - information about the virtual machine to create
+
+        """
+        node = self.driver(account).ex_create_node_from_template(
+                name=vminfo.vm_name,
+                template=vminfo.vsphere.template,
+                )
+
+        vminfo.vm_id = node.id
+
+        return node.id
+
+    def delete_vm(self, account, vm_id):
+        """
+        delete a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).destroy_node(node)
+
+    def reboot_vm(self, account, vm_id):
+        """
+        Reboot a virtual machine.
+
+        @param vm_id     - Instance id of VM to be deleted.
+        """
+        node = Node()
+        node.id = vm_id
+        self.driver(account).reboot_node(node)
diff --git a/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py b/rwcal/plugins/vala/rwcal_vsphere/rwcal_vsphere.py
new file mode 100644 (file)
index 0000000..2dcbd8c
--- /dev/null
@@ -0,0 +1,238 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwCal,
+    RwTypes,
+    RwcalYang)
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwcal.vsphere')
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+class ImageLocationError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class RwcalVspherePlugin(GObject.Object, RwCal.Cloud):
+    """This class implements the CAL VALA methods for Vsphere.
+    """
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="vsphere",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+            
+    @rwstatus(ret_on_failure=[None])
+    def do_get_management_network(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_tenant(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_tenant(self, account, tenant_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_tenant_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_role(self, account, name):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_role(self, account, role_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_role_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_image(self, account, image):
+        raise NotImplementedError()
+
+    
+    @rwstatus
+    def do_delete_image(self, account, image_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_image(self, account, image_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_image_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_vm(self, account, vm):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_start_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_stop_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_reboot_vm(self, account, vm_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_vm_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_flavor(self, account, flavor):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_flavor(self, account, flavor_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_flavor_list(self, account):
+        raise NotImplementedError()        
+            
+    @rwstatus
+    def do_add_host(self, account, host):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_remove_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_host(self, account, host_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_host_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_port(self, account, port):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_port(self, account, port_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_port_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_create_network(self, account, network):
+        raise NotImplementedError()
+
+    @rwstatus
+    def do_delete_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network(self, account, network_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[[]])
+    def do_get_network_list(self, account):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_virtual_link(self, account, link_params):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_virtual_link(self, account, link_id):
+        raise NotImplementedError()        
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_virtual_link(self, account, link_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_get_virtual_link_list(self, account):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vdu(self, account, vdu_init):
+        raise NotImplementedError()            
+    
+    @rwstatus
+    def do_modify_vdu(self, account, vdu_modify):
+        raise NotImplementedError()
+    
+    @rwstatus
+    def do_delete_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+    
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vdu(self, account, vdu_id):
+        raise NotImplementedError()
+
+    @rwstatus(ret_on_failure=[""])
+    def do_get_vdu_list(self, account):
+        raise NotImplementedError()        
diff --git a/rwcal/plugins/yang/CMakeLists.txt b/rwcal/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..a1b24fe
--- /dev/null
@@ -0,0 +1,46 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+
+set(source_yang_files rwcal.yang)
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-cal-log
+    START_EVENT_ID 63000
+    OUT_YANG_FILE_VAR rw_cal_log_file
+    )
+
+rift_add_yang_target(
+  TARGET rwcal_yang
+  YANG_FILES
+    ${source_yang_files}
+    ${rw_cal_log_file}
+  COMPONENT ${PKG_LONG_NAME}
+  DEPENDS
+    mano-types_yang
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+    mano-types_yang_gen
+)
diff --git a/rwcal/plugins/yang/Makefile b/rwcal/plugins/yang/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/plugins/yang/rwcal.yang b/rwcal/plugins/yang/rwcal.yang
new file mode 100644 (file)
index 0000000..53caade
--- /dev/null
@@ -0,0 +1,1226 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rwcal
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwcal";
+  prefix "rwcal";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2014-12-30 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT RWCAL cloud data";
+  }
+
+
+  typedef connection-status {
+    description "Connection status for the cloud account";
+    type enumeration {
+      enum unknown;
+      enum validating;
+      enum success;
+      enum failure;
+    }
+  }
+
+  typedef disk-format {
+    type enumeration {
+      enum ami;
+      enum ari;
+      enum aki;
+      enum vhd;
+      enum vmdk;
+      enum raw;
+      enum qcow2;
+      enum vdi;
+      enum iso;
+    }
+  }
+
+  typedef container-format {
+    type enumeration{
+      enum ami;
+      enum ari;
+      enum aki;
+      enum bare;
+      enum ovf;
+    }
+  }
+
+  grouping connection-status {
+    container connection-status {
+      config false;
+      rwpb:msg-new CloudConnectionStatus;
+      leaf status {
+        type connection-status;
+      }
+      leaf details {
+        type string;
+      }
+    }
+  }
+
+  uses connection-status;
+
+  typedef sdn-account-type {
+    description "SDN account type";
+    type enumeration {
+      enum odl;
+      enum mock;
+      enum sdnsim;
+    }
+  }
+
+  grouping sdn-provider-auth {
+    leaf account-type {
+      type sdn-account-type;
+    }
+
+    choice provider-specific-info {
+      container odl {
+        leaf username {
+          type string {
+            length "1..255";
+          }
+        }
+
+        leaf password {
+          type string {
+            length "1..32";
+          }
+        }
+
+        leaf url {
+          type string {
+            length "1..255";
+          }
+        }
+      }
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_mock";
+        }
+      }
+
+      container sdnsim {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_sim";
+        }
+      }
+    }
+  }
+
+  grouping provider-auth {
+    leaf account-type {
+      type manotypes:cloud-account-type;
+    }
+
+    choice provider-specific-info {
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_mock";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+      container aws {
+        leaf key {
+          type string;
+        }
+
+        leaf secret {
+          type string;
+        }
+
+        leaf region {
+          type string;
+        }
+        leaf vpcid {
+          description "VPC ID to use to instantiate EC2 instances";
+          type string;
+        }
+        leaf ssh-key {
+          description "Key pair name to connect to EC2 instance";
+          type string;
+        }
+        leaf availability-zone {
+          description "Availability zone where EC2 instance should
+              be started";
+          type string;
+        }
+        leaf default-subnet-id {
+          description "Default subnet ID to create network
+              interface at instance creation time";
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_aws";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container openstack {
+        leaf key {
+          type string;
+          mandatory true;
+        }
+
+        leaf secret {
+          type string;
+          mandatory true;
+        }
+
+        leaf auth_url {
+          type string;
+          mandatory true;
+        }
+
+        leaf tenant {
+          type string;
+          mandatory true;
+        }
+
+        leaf admin {
+          type boolean;
+          default false;
+        }
+
+        leaf mgmt-network {
+          type string;
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openstack";
+        }
+
+        leaf-list security-groups {
+          type string;
+          description "Names of the security groups for the VM";
+        }
+        
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+
+        leaf floating-ip-pool {
+          type string;
+          description "Name of floating IP pool to use for floating IP address assignement";
+        }
+
+        leaf cert-validate {
+          type boolean;
+          default false;
+          description "Certificate validatation policy in case of SSL/TLS connection";
+        }
+        
+      }
+
+      container openmano {
+        leaf host {
+          type string;
+          default "localhost";
+        }
+
+        leaf port {
+          type uint16;
+          default 9090;
+        }
+
+        leaf tenant-id {
+          type string {
+            length "36";
+          }
+          mandatory true;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal_openmano";
+        }
+      }
+
+      container vsphere {
+        leaf username {
+          type string;
+        }
+
+        leaf password {
+          type string;
+        }
+
+        leaf url {
+          type string;
+        }
+
+        leaf plugin-name {
+          type string;
+          default "rwcal-python";
+        }
+
+        leaf dynamic-flavor-support {
+          type boolean;
+          default false;
+        }
+      }
+
+      container cloudsim {
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsim";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container cloudsim_proxy {
+        leaf host {
+          type string;
+          default "localhost";
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_cloudsimproxy";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+      }
+
+      container openvim {
+        leaf host {
+          type string;
+          mandatory true;
+        }
+        leaf port {
+          type uint16;
+          default 9080;
+        }
+        leaf tenant-name {
+          type string;
+          description "Mandatory parameter to indicate openvim tenant name";
+          mandatory true;
+        }
+        leaf mgmt-network {
+          type string;
+          mandatory true;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwcal_openmano_vimconnector";
+        }
+        leaf dynamic-flavor-support {
+          type boolean;
+          default true;
+        }
+        container image-management {
+          description " Information required for OpenVim image upload operation";
+
+          leaf username {
+            description "Username for host access";
+            type string;
+          }
+          leaf password {
+            description "Password for host access";
+            type string;
+          }
+          leaf image-directory-path {
+            description "Name of the directory on the host where image needs to be copied";
+            type string;
+            default "/opt/VNF/images";
+          }
+        }
+      }
+    }
+  }
+  
+  grouping vm-info-item {
+    leaf vm-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf vm-size {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf availability-zone {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf tenant-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf host-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf allocate-public-address {
+      rwpb:field-inline "true";
+      description "If this VM should allocate a floating public IP address";
+      type boolean;
+      default false;
+    }
+
+    list private-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list public-ip-list {
+      key "ip-address";
+
+      leaf ip-address {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list port-list {
+      key "port-id";
+      leaf port-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list network-list {
+      key "network-id";
+      leaf network-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    container cloud-init {
+      leaf userdata {
+        description
+            "The userdata field for cloud-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+
+    container user_tags {
+
+      leaf node-id {
+        type string;
+      }
+
+      leaf pci_assignement {
+        type string;
+      }
+
+      leaf tag1 {
+        type string;
+      }
+    }
+
+    leaf server-group {
+      type string;
+    }
+  }
+
+  grouping image-info-item {
+    leaf id {
+      type string;
+    }
+
+    leaf name {
+      type string;
+    }
+
+    choice image_file {
+      leaf location {
+        description "Image URL location";
+        type string;
+      }
+
+      leaf fileno {
+        description "Image file descriptor";
+        type uint32;
+      }
+    }
+
+    leaf checksum {
+      type string;
+    }
+
+    leaf virtual_size_mbytes {
+      description "Virtual size of the image";
+      type uint64;
+    }
+
+    leaf disk_format {
+      description "Format of the Disk";
+      type disk-format;
+      default "qcow2";
+    }
+
+    leaf container_format {
+      description "Format of the container";
+      type container-format;
+      default "bare";
+    }
+
+    leaf state {
+      description "State of the Image object in CAL";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    container user-tags {
+      description "User tags associated with Image";
+      leaf checksum {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping network-info-item {
+    leaf network-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+  }
+
+  grouping port-info-item {
+    leaf port-name {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf port-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-state {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf network-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vm-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf port-type {
+      description "Type of the port";
+      type enumeration {
+        enum normal;
+        enum macvtap;
+        enum direct;
+      }
+      default "normal";
+    }
+
+    choice provider-specific-info {
+      container lxc {
+        leaf veth-name {
+          type string;
+        }
+      }
+    }
+  }
+
+  container cloud-accounts {
+    list cloud-account-list {
+      rwpb:msg-new CloudAccount;
+      key "name";
+
+      leaf name {
+        type string;
+      }
+      uses provider-auth;
+    }
+  }
+
+  container vim-resources {
+    rwpb:msg-new VimResources;
+    config false;
+
+    list vminfo-list {
+      rwpb:msg-new VMInfoItem;
+      config false;
+      key "vm-id";
+
+      uses vm-info-item;
+    }
+
+    list imageinfo-list {
+      rwpb:msg-new ImageInfoItem;
+      config false;
+      key "id";
+
+      uses image-info-item;
+    }
+
+    list tenantinfo-list {
+      rwpb:msg-new TenantInfoItem;
+      config false;
+      key "tenant-id";
+
+      leaf tenant-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf tenant-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list userinfo-list {
+      rwpb:msg-new UserInfoItem;
+      config false;
+      key "user-id";
+
+      leaf user-name{
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf user-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list roleinfo-list {
+      rwpb:msg-new RoleInfoItem;
+      config false;
+      key "role-id";
+
+      leaf role-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf role-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list hostinfo-list {
+      rwpb:msg-new HostInfoItem;
+      config false;
+      key "host-id";
+
+      leaf host-name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf host-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+
+    list networkinfo-list {
+      rwpb:msg-new NetworkInfoItem;
+      config false;
+      key "network-id";
+
+      uses network-info-item;
+    }
+
+    list portinfo-list {
+      rwpb:msg-new PortInfoItem;
+      config false;
+      key "port-id";
+
+      uses port-info-item;
+    }
+
+    list flavorinfo-list {
+      rwpb:msg-new FlavorInfoItem;
+      config false;
+      key "id";
+
+      leaf id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+
+      leaf name {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 255;
+        type string;
+      }
+
+      uses manotypes:vm-flavor;
+      uses manotypes:guest-epa;
+      uses manotypes:vswitch-epa;
+      uses manotypes:hypervisor-epa;
+      uses manotypes:host-epa;
+      uses manotypes:placement-group-input;
+    }
+  }
+
+  grouping virtual-link-create-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf associate-public-ip {
+      type boolean;
+      default false;
+    }
+    leaf vim-network-name {
+      description
+          "Name of network in VIM account. This is used to indicate
+          pre-provisioned network name in cloud account.";
+      type string;
+    }
+
+    uses manotypes:provider-network;
+    uses manotypes:ip-profile-info;
+  }
+
+
+  container virtual-link-req-params {
+    description "This object defines the parameters required to create a virtual-link";
+    rwpb:msg-new VirtualLinkReqParams;
+    uses virtual-link-create-params;
+  }
+
+
+  grouping connection-point-type {
+    leaf type {
+      description
+          "Specifies the type of connection point
+             VIRTIO          : Use the traditional VIRTIO interface.
+             PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+             SR-IOV          : Use SR-IOV interface.";
+      type enumeration {
+        enum VIRTIO;
+        enum PCI-PASSTHROUGH;
+        enum SR-IOV;
+      }
+      default "VIRTIO";
+    }
+  }
+
+
+  grouping vdu-create-params {
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf mgmt-vpci {
+      description
+          "Specifies the virtual PCI address. Expressed in
+           the following format dddd:dd:dd.d. For example
+           0000:00:12.0. This information can be used to
+           pass as metadata during the VM creation.";
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-name {
+      description "Image name which can be used to lookup the image-id";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 256;
+    }
+
+    leaf image-checksum {
+      description "Image md5sum checksum used in combination with image name to lookup image-id ";
+      type string;
+      rwpb:field-inline "true";
+      rwpb:field-string-max 32;
+    }
+
+    uses manotypes:placement-group-input;
+    
+    list connection-points {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+      
+      leaf vpci {
+        description
+            "Specifies the virtual PCI address. Expressed in
+             the following format dddd:dd:dd.d. For example
+             0000:00:12.0. This information can be used to
+             pass as metadata during the VM creation.";
+        type string;
+      }
+
+      leaf security-group {
+        description "Name of the security group";
+        type string;
+      }
+
+      uses connection-point-type;
+    }
+
+    leaf allocate-public-address {
+      description "If this VDU needs public IP address";
+      type boolean;
+      default false;
+    }
+
+    container vdu-init {
+      leaf userdata {
+        description
+            "The userdata field for vdu-init should contain
+             the contents of the script that cloud-init should
+             invoke when configuring the system. Note that this
+             script is expected to be in the cloud-config format";
+        type string;
+      }
+    }
+  }
+
+  container vdu-init-params {
+    description "This object defines the parameters required to create a VDU";
+    rwpb:msg-new VDUInitParams;
+    uses vdu-create-params;
+  }
+
+  container vdu-modify-params {
+    description "This object defines the parameters required to modify VDU";
+    rwpb:msg-new VDUModifyParams;
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points-add {
+      key "name";
+      leaf name {
+        description "Name of the connection point";
+        type string;
+      }
+      leaf virtual-link-id {
+        description "CAL assigned resource Id for the Virtual Link";
+        type string;
+      }
+      leaf associate-public-ip {
+        type boolean;
+        default false;
+      }
+
+      uses connection-point-type;
+    }
+
+    list connection-points-remove {
+      key "connection-point-id";
+      leaf connection-point-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+    }
+  }
+
+  grouping connection-point-info-params {
+    leaf connection-point-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf name {
+      description "Name of the connection point";
+      type string;
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf vdu-id {
+      description "CAL assigned id for VDU to which this connection point belongs";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "CMP agnostic generic state of the connection point";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+    }
+
+    leaf ip-address {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+  }
+
+  grouping virtual-link-info-params {
+    leaf name {
+      description "Name of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf state {
+      description "State of the Virtual Link";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf virtual-link-id {
+      description "CAL assigned resource ID of the Virtual-Link";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+
+    leaf subnet {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:provider-network;
+
+  }
+
+  grouping vdu-info-params {
+    leaf vdu-id {
+      description "CAL assigned id for VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+    leaf name {
+      description "Name of the VDU";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 255;
+      type string;
+    }
+
+    leaf flavor-id {
+      description "CAL assigned flavor-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf image-id {
+      description "CAL assigned image-id for the VDU image";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf node-id {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf state {
+      description "State of the VDU";
+      type enumeration {
+        enum active;
+        enum inactive;
+        enum failed;
+        enum unknown;
+      }
+      default "unknown";
+    }
+
+    leaf management-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    leaf public-ip {
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+    }
+
+    uses manotypes:vm-flavor;
+    uses manotypes:guest-epa;
+    uses manotypes:vswitch-epa;
+    uses manotypes:hypervisor-epa;
+    uses manotypes:host-epa;
+    uses manotypes:placement-group-input;
+    
+    list connection-points {
+      key connection-point-id;
+      uses connection-point-info-params;
+    }
+    leaf console-url {
+      type string;
+      description "Console URL from the VIM, if available";
+    }
+  }
+
+  container vnf-resources {
+    rwpb:msg-new VNFResources;
+    config false;
+
+    list virtual-link-info-list {
+      rwpb:msg-new VirtualLinkInfoParams;
+      config false;
+      key virtual-link-id;
+      uses virtual-link-info-params;
+    }
+
+    list vdu-info-list {
+      rwpb:msg-new VDUInfoParams;
+      config false;
+      key vdu-id;
+      uses vdu-info-params;
+    }
+  }
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/rwcal/rift/cal/client.py b/rwcal/rift/cal/client.py
new file mode 100644 (file)
index 0000000..4717b0b
--- /dev/null
@@ -0,0 +1,68 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file client.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import os
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from gi.repository import RwcalYang
+
+import rift.cal.utils as cal_utils
+
+
+class CloudsimClient(cal_utils.CloudSimCalMixin):
+    """Cloudsim client that handles interactions with the server.
+    """
+    def __init__(self, log):
+        super().__init__()
+        self.log = log
+
+    @property
+    def images(self):
+        _, images = self.cal.get_image_list(self.account)
+        return images.imageinfo_list or []
+
+    @property
+    def vlinks(self):
+        _, vlinks = self.cal.get_virtual_link_list(self.account)
+        return vlinks.virtual_link_info_list or []
+
+    @property
+    def vdus(self):
+        _, vdus = self.cal.get_vdu_list(self.account)
+        return vdus.vdu_info_list or []
+
+    def upload_image(self, location, name=None):
+        """Onboard image to cloudsim server."""
+
+        image = RwcalYang.ImageInfoItem()
+        image.name = name or os.path.basename(location)
+        image.location = location
+        image.disk_format = "qcow2"
+        rc, image.id = self.cal.create_image(self.account, image)
+
+        self.log.info("Image created: {}".format(image.as_dict()))
+
+        return image
diff --git a/rwcal/rift/cal/cloudsim b/rwcal/rift/cal/cloudsim
new file mode 100644 (file)
index 0000000..fc2e4dd
--- /dev/null
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+
+import argparse
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+import rift.cal.server as cal_server
+import rift.cal.client as cal_client
+import rift.cal.utils as cal_utils
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.shell as shell
+
+from prettytable import PrettyTable
+
+
+START_PARSER = "start"
+STOP_PARSER = "stop"
+CLEAN_PARSER = "clean"
+FCLEAN_PARSER = "force-clean"
+IMAGE_PARSER = "image-create"
+STATUS_PARSER = "status"
+
+
+class CloudsimOperations(cal_utils.CloudSimCalMixin):
+    def __init__(self, args):
+        super().__init__()
+        self.log = cal_utils.Logger(
+                    daemon_mode=False,
+                    log_name="Parser",
+                    log_level=logging.getLevelName(args.log_level)).logger
+
+        self.args = args
+        self.operations = cal_server.CloudsimServerOperations(self.log)
+        self.client = cal_client.CloudsimClient(self.log)
+        self._cal, self._account = None, None
+
+    @property
+    def log_file(self):
+        return cal_utils.Logger.LOG_FILE
+
+    @cal_utils.check_and_create_bridge
+    def start_server(self):
+        self.operations.start_server(foreground=self.args.foreground)
+
+    @cal_utils.check_and_create_bridge
+    def stop_server(self):
+        self.operations.stop_server()
+
+    @cal_utils.check_and_create_bridge
+    def clean_resources(self):
+        """Clean all resource using rest APIs. """
+        self.operations.clean_server(images=self.args.all)
+
+    @cal_utils.check_and_create_bridge
+    def upload_image(self):
+        """Onboard image to cloudsim server."""
+        self.client.upload_image(self.args.location, name=self.args.name)
+
+    def force_clean_resources(self):
+        """Force clean up all resource. """
+        self.log.info("Cleaning up logs")
+        shell.command("rm -f {}".format(self.log_file))
+
+        self.log.info("Cleaning up PID file")
+        shell.command("rm -f {}".format(self.operations.PID_FILE))
+
+        try:
+            self.log.info("Purging LXC resources")
+            for container in lxc.containers():
+                lxc.stop(container)
+
+            for container in lxc.containers():
+                lxc.destroy(container)
+
+            lvm.destroy('rift')
+
+        except shell.ProcessError:
+            self.log.exception("Unable to purge resources. Trying a force clean now.")
+            lxc.force_clean()
+
+    @cal_utils.check_and_create_bridge
+    def show_status(self):
+
+        cld_tbl = PrettyTable(['PID', 'Status', 'Log file'])
+
+        pid = self.operations.pid
+        if pid:
+            cld_tbl.add_row([pid, "RUNNING", self.log_file])
+        else:
+            cld_tbl.add_row(["-", "STOPPED", self.log_file])
+
+        print ("Cloudsim server:")
+        print (cld_tbl)
+
+        if not pid:
+            return
+
+        # Images
+        img_tbl = PrettyTable(['ID', 'Name', 'Format'])
+        vlink_tbl = PrettyTable([
+                'ID', 'Name', 'Bridge Name', 'State', 'Subnet', 'Ports', "IPs"])
+        vdu_tbl = PrettyTable([
+            'ID', 'Name', 'LXC Name', 'IP', 'State', 'Ports', "VLink ID"])
+
+
+        images = self.client.images
+        if images:
+            for image in images:
+                img_tbl.add_row([image.id, image.name, image.disk_format])
+
+            print ("Images:")
+            print (img_tbl)
+
+        vlinks = self.client.vlinks
+        if vlinks:
+            for vlink in vlinks:
+
+                ports, ips = [], []
+                for cp in vlink.connection_points:
+                    ports.append("{} ({})".format(cp.name, cp.connection_point_id))
+                    ips.append(cp.ip_address)
+
+                vlink_tbl.add_row([
+                    vlink.virtual_link_id,
+                    vlink.name,
+                    vlink.name[:15],
+                    vlink.state,
+                    vlink.subnet,
+                    "\n".join(ports),
+                    "\n".join(ips)])
+
+            print ("Vlink:")
+            print (vlink_tbl)
+
+
+        lxc_to_ip = lxc.ls_info()
+        def get_lxc_name(ip):
+            for lxc_name, ips in lxc_to_ip.items():
+                if str(ip) in ips:
+                    return lxc_name
+
+            return ""
+
+        vdus = self.client.vdus
+        if vdus:
+            for vdu in vdus:
+                ports, links = [], []
+                for cp in vdu.connection_points:
+                    ports.append("{} ({})".format(cp.name, cp.ip_address))
+                    links.append(cp.virtual_link_id)
+
+                vdu_tbl.add_row([
+                    vdu.vdu_id, vdu.name, get_lxc_name(vdu.public_ip), vdu.public_ip,
+                    vdu.state, "\n".join(ports), "\n".join(links)])
+
+            print ("VDU:")
+            print (vdu_tbl)
+
+
+def parse(arguments):
+    parser = argparse.ArgumentParser(description=__doc__,
+                                    formatter_class=argparse.RawDescriptionHelpFormatter)
+    parser.add_argument(
+            '--log-level', '-l',
+            default="WARNING",
+            type=str,
+            choices=["INFO", "DEBUG", "WARNING", "ERROR"],
+            help="Set log level, defaults to warning and above.")
+
+    subparsers = parser.add_subparsers()
+
+    start_parser = subparsers.add_parser(START_PARSER, help="Start the server")
+    start_parser.add_argument(
+            '--foreground', "-f",
+            help="Run the server in the foreground. The logs are sent to console.",
+            default=False,
+            action="store_true")
+    start_parser.set_defaults(which=START_PARSER)
+
+    stop_parser = subparsers.add_parser(STOP_PARSER, help="Stop the server")
+    stop_parser.set_defaults(which=STOP_PARSER)
+
+    clean_parser = subparsers.add_parser(
+            CLEAN_PARSER,
+            help="Clean LXC resources. By default all resources except " + \
+                 "images are cleared.")
+    clean_parser.add_argument(
+            '--all', '-a', 
+            help="Cleans up all resources including images",
+            default=False,
+            action="store_true")
+    clean_parser.set_defaults(which=CLEAN_PARSER)
+
+    fclean_parser = subparsers.add_parser(
+            FCLEAN_PARSER,
+            help="Force clean all lxc resources")
+    fclean_parser.set_defaults(which=FCLEAN_PARSER)
+
+    image_parser = subparsers.add_parser(IMAGE_PARSER, help="Upload images")
+    image_parser.add_argument(
+            '--name', '-n',
+            help="(Optional) Name of the image")
+    image_parser.add_argument(
+            '--location', '-l',
+            help="Image location. If name is not specified the basename of " + \
+                 "the image path is used.",
+            required=True)
+    image_parser.set_defaults(which=IMAGE_PARSER)
+
+    show_parser = subparsers.add_parser(
+            STATUS_PARSER,
+            help="Shows the current status of LXC")
+    show_parser.set_defaults(which=STATUS_PARSER)
+
+    args = parser.parse_args(arguments)
+
+    return args
+
+
+def main(args):
+
+    args = parse(args)
+
+    operations = CloudsimOperations(args)
+
+    if args.which == START_PARSER:
+        operations.start_server()
+    elif args.which == STOP_PARSER:
+        operations.stop_server()
+    elif args.which == FCLEAN_PARSER:
+        operations.force_clean_resources()
+    elif args.which == CLEAN_PARSER:
+        operations.clean_resources()
+    elif args.which == IMAGE_PARSER:
+        operations.upload_image()
+    elif args.which == STATUS_PARSER:
+        operations.show_status()
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git a/rwcal/rift/cal/rwcal_status.py b/rwcal/rift/cal/rwcal_status.py
new file mode 100644 (file)
index 0000000..6867140
--- /dev/null
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#
+# @file rwcal_status.py
+# @brief This module defines Python utilities for dealing with rwcalstatus codes.
+
+import traceback
+import functools
+import gi
+gi.require_version('RwTypes', '1.0')
+
+from gi.repository import RwTypes, RwCal
+
+def rwcalstatus_from_exc_map(exc_map):
+    """ Creates an rwcalstatus decorator from a dictionary mapping exception
+    types to rwstatus codes, and return a error object containing Exception details
+    """
+
+    # A decorator that maps a Python exception to a particular return code.
+    # Also returns an object containing the error msg, traceback and rwstatus
+    # Automatically returns RW_SUCCESS when no Python exception was thrown.
+    # Prevents us from having to use try: except: handlers around every function call.
+
+    def rwstatus(arg=None, ret_on_failure=None):
+        def decorator(func):
+            @functools.wraps(func)
+            def wrapper(*args, **kwds):
+                rwcal_status = RwCal.RwcalStatus()
+                try:
+                    ret = func(*args, **kwds)
+
+                except Exception as e:
+                    rwcal_status.traceback = traceback.format_exc()
+                    rwcal_status.error_msg = str(e)
+
+                    ret_code = [status for exc, status in exc_map.items() if isinstance(e, exc)]
+                    ret_list = [None] if ret_on_failure is None else list(ret_on_failure)
+                    if len(ret_code):
+                        rwcal_status.status = ret_code[0]
+                    else:
+                        # If it was not explicitly mapped, print the full traceback as this
+                        # is not an anticipated error.
+                        traceback.print_exc()
+                        rwcal_status.status = RwTypes.RwStatus.FAILURE
+
+                    ret_list.insert(0, rwcal_status)
+                    return tuple(ret_list)
+
+
+                rwcal_status.status = RwTypes.RwStatus.SUCCESS
+                rwcal_status.traceback = ""
+                rwcal_status.error_msg = ""
+                ret_list = [rwcal_status]
+                if ret is not None:
+                    if type(ret) == tuple:
+                        ret_list.extend(ret)
+                    else:
+                        ret_list.append(ret)
+
+                return tuple(ret_list)
+
+            return wrapper
+
+        if isinstance(arg, dict):
+            exc_map.update(arg)
+            return decorator
+        elif ret_on_failure is not None:
+            return decorator
+        else:
+            return decorator(arg)
+
+    return rwstatus
diff --git a/rwcal/rift/cal/server/__init__.py b/rwcal/rift/cal/server/__init__.py
new file mode 100644 (file)
index 0000000..b81f6c5
--- /dev/null
@@ -0,0 +1,26 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file __init__.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+
+from .server import CalServer
+from .operations import CloudsimServerOperations
\ No newline at end of file
diff --git a/rwcal/rift/cal/server/app.py b/rwcal/rift/cal/server/app.py
new file mode 100644 (file)
index 0000000..355d653
--- /dev/null
@@ -0,0 +1,543 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file app.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import asyncio
+import collections
+import concurrent.futures
+import logging
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    RwCal,
+    RwcalYang,
+    RwTypes,
+)
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalCallFailure(Exception):
+    pass
+
+
+class RPCParam(object):
+    def __init__(self, key, proto_type=None):
+        self.key = key
+        self.proto_type = proto_type
+
+
+class CalRequestHandler(tornado.web.RequestHandler):
+    def initialize(self, log, loop, cal, account, executor, cal_method,
+                   input_params=None, output_params=None):
+        self.log = log
+        self.loop = loop
+        self.cal = cal
+        self.account = account
+        self.executor = executor
+        self.cal_method = cal_method
+        self.input_params = input_params
+        self.output_params = output_params
+
+    def wrap_status_fn(self, fn, *args, **kwargs):
+
+        ret = fn(*args, **kwargs)
+        if not isinstance(ret, collections.Iterable):
+            ret = [ret]
+
+        rw_status = ret[0]
+
+        if type(rw_status) is RwCal.RwcalStatus:
+            rw_status = rw_status.status
+
+        if type(rw_status) != RwTypes.RwStatus:
+            raise ValueError("First return value of %s function was not a RwStatus" %
+                             fn.__name__)
+
+        if rw_status != RwTypes.RwStatus.SUCCESS:
+            msg = "%s returned %s" % (fn.__name__, str(rw_status))
+            self.log.error(msg)
+            raise CalCallFailure(msg)
+
+        return ret[1:]
+
+    @tornado.gen.coroutine
+    def post(self):
+        def body_to_cal_args():
+            cal_args = []
+            if self.input_params is None:
+                return cal_args
+
+            input_dict = tornado.escape.json_decode(self.request.body)
+            if len(input_dict) != len(self.input_params):
+                raise ValueError("Got %s parameters, expected %s" %
+                                 (len(input_dict), len(self.input_params)))
+
+            for input_param in self.input_params:
+                key = input_param.key
+                value = input_dict[key]
+                proto_type = input_param.proto_type
+
+                if proto_type is not None:
+                    proto_cls = getattr(RwcalYang, proto_type)
+                    self.log.debug("Deserializing into %s type", proto_cls)
+                    value = proto_cls.from_dict(value)
+
+                cal_args.append(value)
+
+            return cal_args
+
+        def cal_return_vals(return_vals):
+            output_params = self.output_params
+            if output_params is None:
+                output_params = []
+
+            if len(return_vals) != len(output_params):
+                raise ValueError("Got %s return values.  Expected %s",
+                                 len(return_vals), len(output_params))
+
+            write_dict = {"return_vals": []}
+            for i, output_param in enumerate(output_params):
+                key = output_param.key
+                proto_type = output_param.proto_type
+                output_value = return_vals[i]
+
+                if proto_type is not None:
+                    output_value = output_value.as_dict()
+
+                return_val = {
+                        "key": key,
+                        "value": output_value,
+                        "proto_type": proto_type,
+                        }
+
+                write_dict["return_vals"].append(return_val)
+
+            return write_dict
+
+        @asyncio.coroutine
+        def handle_request():
+            self.log.debug("Got cloudsimproxy POST request: %s", self.request.body)
+            cal_args = body_to_cal_args()
+
+            # Execute the CAL request in a seperate thread to prevent
+            # blocking the main loop.
+            return_vals = yield from self.loop.run_in_executor(
+                    self.executor,
+                    self.wrap_status_fn,
+                    getattr(self.cal, self.cal_method),
+                    self.account,
+                    *cal_args
+                    )
+
+            return cal_return_vals(return_vals)
+
+        f = asyncio.ensure_future(handle_request(), loop=self.loop)
+        return_dict = yield tornado.platform.asyncio.to_tornado_future(f)
+
+        self.log.debug("Responding to %s RPC with %s", self.cal_method, return_dict)
+
+        self.clear()
+        self.set_status(200)
+        self.write(return_dict)
+
+
+class CalProxyApp(tornado.web.Application):
+    def __init__(self, log, loop, cal_interface, cal_account):
+        self.log = log
+        self.loop = loop
+        self.cal = cal_interface
+        self.account = cal_account
+
+        attrs = dict(
+            log=self.log,
+            loop=self.loop,
+            cal=cal_interface,
+            account=cal_account,
+            # Create an executor with a single worker to prevent
+            # having multiple simulteneous calls into CAL (which is not threadsafe)
+            executor=concurrent.futures.ThreadPoolExecutor(1)
+            )
+
+        def mk_attrs(cal_method, input_params=None, output_params=None):
+            new_attrs = {
+                    "cal_method": cal_method,
+                    "input_params": input_params,
+                    "output_params": output_params
+                    }
+            new_attrs.update(attrs)
+
+            return new_attrs
+
+        super(CalProxyApp, self).__init__([
+            (r"/api/get_image_list", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image_list",
+                    output_params=[
+                        RPCParam("images", "VimResources"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/create_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_image",
+                    input_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("image_id"),
+                        ]
+                    ),
+                ),
+
+            (r"/api/delete_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="delete_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/get_image", CalRequestHandler,
+                mk_attrs(
+                    cal_method="get_image",
+                    input_params=[
+                        RPCParam("image_id"),
+                        ],
+                    output_params=[
+                        RPCParam("image", "ImageInfoItem"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/create_vm", CalRequestHandler,
+                mk_attrs(
+                    cal_method="create_vm",
+                    input_params=[
+                        RPCParam("vm", "VMInfoItem"),
+                        ],
+                    output_params=[
+                        RPCParam("vm_id"),
+                        ],
+                    ),
+                ),
+
+            (r"/api/start_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="start_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/stop_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="stop_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/reboot_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="reboot_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm_list",
+                        output_params=[
+                            RPCParam("vms", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vm", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vm",
+                        input_params=[
+                            RPCParam("vm_id"),
+                            ],
+                        output_params=[
+                            RPCParam("vms", "VMInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_flavor",
+                        input_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor_list",
+                        output_params=[
+                            RPCParam("flavors", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_flavor", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_flavor",
+                        input_params=[
+                            RPCParam("flavor_id"),
+                            ],
+                        output_params=[
+                            RPCParam("flavor", "FlavorInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_network",
+                        input_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network",
+                        input_params=[
+                            RPCParam("network_id"),
+                            ],
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_network_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_network_list",
+                        output_params=[
+                            RPCParam("networks", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_management_network", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_management_network",
+                        output_params=[
+                            RPCParam("network", "NetworkInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_port",
+                        input_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        output_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port",
+                        input_params=[
+                            RPCParam("port_id"),
+                            ],
+                        output_params=[
+                            RPCParam("port", "PortInfoItem"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_port_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_port_list",
+                        output_params=[
+                            RPCParam("ports", "VimResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_virtual_link",
+                        input_params=[
+                            RPCParam("link_params", "VirtualLinkReqParams"),
+                            ],
+                        output_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link",
+                        input_params=[
+                            RPCParam("link_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VirtualLinkInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_virtual_link_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_virtual_link_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/create_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="create_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUInitParams"),
+                            ],
+                        output_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/modify_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="modify_vdu",
+                        input_params=[
+                            RPCParam("vdu_params", "VDUModifyParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/delete_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="delete_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu",
+                        input_params=[
+                            RPCParam("vdu_id"),
+                            ],
+                        output_params=[
+                            RPCParam("response", "VDUInfoParams"),
+                            ],
+                        ),
+                    ),
+
+            (r"/api/get_vdu_list", CalRequestHandler,
+                    mk_attrs(
+                        cal_method="get_vdu_list",
+                        output_params=[
+                            RPCParam("resources", "VNFResources"),
+                            ],
+                        ),
+                    )
+            ])
diff --git a/rwcal/rift/cal/server/operations.py b/rwcal/rift/cal/server/operations.py
new file mode 100644 (file)
index 0000000..316525e
--- /dev/null
@@ -0,0 +1,200 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file operations.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import daemon
+import daemon.pidfile
+import os
+import signal
+import subprocess
+import sys
+import time
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from . import server as cal_server
+import rift.cal.utils as cal_util
+import rift.rwcal.cloudsim.shell as shell
+
+
+
+class CloudsimServerOperations(cal_util.CloudSimCalMixin):
+    """Convenience class to provide start, stop and cleanup operations
+    
+    Attributes:
+        log (logging): Log instance
+        PID_FILE (str): Location to generate the PID file.
+    """
+    PID_FILE = "/var/log/rift/cloudsim_server.pid"
+
+    def __init__(self, log):
+        super().__init__()
+        self.log = log
+
+    @property
+    def pid(self):
+        pid = None
+        try:
+            with open(self.PID_FILE) as fh:
+                pid = fh.readlines()[0]
+                pid = int(pid.strip())
+        except IndexError:
+            self.log.error("Looks like the pid file does not contain a valid ID")
+        except OSError:
+            self.log.debug("No PID file found.")
+
+        return pid
+
+    def is_pid_exists(self, pid):
+        try:
+            os.kill(pid, 0)
+        except OSError:
+            return False
+
+        return True
+
+    def start_server(self, foreground=False):
+        """Start the tornado app """
+
+        # Before starting verify if all requirements are satisfied
+        cal_server.CalServer.verify_requirements(self.log)
+
+        # If the /var/log directory is not present, then create it first.
+        if not os.path.exists(os.path.dirname(self.PID_FILE)):
+            self.log.warning("Creating /var/log/rift directory for log storage")
+            os.makedirs(os.path.dirname(self.PID_FILE))
+
+        # Check if an exiting PID file is present, if so check if it has an
+        # associated proc, otherwise it's a zombie file so clean it.
+        # Otherwise the daemon fails silently.
+        if self.pid is not None and not self.is_pid_exists(self.pid):
+            self.log.warning("Removing stale PID file")
+            os.remove(self.PID_FILE)
+
+
+
+        def start(daemon_mode=False):
+
+            log = cal_util.Logger(daemon_mode=daemon_mode, log_name='')
+            log.logger.info("Starting the cloud server.")
+            server = cal_server.CalServer()
+            server.start()
+
+        if foreground:
+            # Write the PID file for consistency
+            with open(self.PID_FILE, mode='w') as fh:
+                fh.write(str(os.getpid()) + "\n")
+            start()
+        else:
+            context = daemon.DaemonContext(
+                pidfile=daemon.pidfile.PIDLockFile(self.PID_FILE))
+            with context:
+                start(daemon_mode=True)
+
+    def stop_server(self):
+        """Stop the daemon"""
+
+        def kill_pid(pid, sig):
+            self.log.info("Sending {} to PID: {}".format(str(sig), pid))
+            os.kill(pid, sig)
+
+
+        def search_and_kill():
+            """In case the PID file is not found, and the server is still
+            running, as a last resort we search thro' the process table
+            and stop the server."""
+            cmd = ["pgrep", "-u", "daemon,root", "python3"]
+
+            try:
+               pids = subprocess.check_output(cmd)
+            except subprocess.CalledProcessError:
+                self.log.error("No Cloudsim server process found. "
+                        "Please ensure Cloudsim server is running")
+                return
+
+            pids = map(int, pids.split())
+
+            for pid in pids:
+                if pid != os.getpid():
+                    kill_sequence(pid)
+
+        def wait_till_exit(pid, timeout=30, retry_interval=1):
+            start_time = time.time()
+
+            while True:
+                if not self.is_pid_exists(pid):
+                    msg = "Killed {}".format(pid)
+                    print (msg)
+                    return True
+
+                time_elapsed = time.time() - start_time
+                time_remaining = timeout - time_elapsed
+
+                self.log.info("Process still exists, trying again in {} sec(s)"
+                    .format(retry_interval))
+
+                if time_remaining <= 0:
+                    msg = 'Process {} has not yet terminated within {} secs. Trying SIGKILL'
+                    self.log.error(msg.format(pid, timeout))
+                    return False
+
+                time.sleep(min(time_remaining, retry_interval))
+
+        def kill_sequence(pid):
+            kill_pid(pid, signal.SIGHUP)
+            wait_till_exit(pid, timeout=10, retry_interval=2)
+            kill_pid(pid, signal.SIGKILL)
+            status = wait_till_exit(pid)
+
+            if status:
+                # Remove the lock file.
+                shell.command("rm -f {}".format(self.PID_FILE))
+
+        pid = self.pid
+        if pid is not None:
+            self.log.warning("Server running with PID: {} found, "
+                             "trying to stop it".format(pid))
+            kill_sequence(pid)
+        else:
+            self.log.warning("No PID file found. Searching the process "
+                            "table to find PID")
+            search_and_kill()
+
+    def clean_server(self, images=False):
+        """Clean all resource using rest APIs. """
+
+        # Delete VDUs
+        _, vdus = self.cal.get_vdu_list(self.account)
+        for vdu in vdus.vdu_info_list:
+            self.cal.delete_vdu(self.account, vdu.vdu_id)
+
+        # Delete Vlinks
+        _, vlinks = self.cal.get_virtual_link_list(self.account)
+        for vlink in vlinks.virtual_link_info_list:
+            self.cal.delete_virtual_link(self.account, vlink.virtual_link_id)
+
+        if images:
+            _, images = self.cal.get_image_list(self.account)
+            for image in images.image_info_list:
+                self.cal.delete_image(self.account, image.id)
diff --git a/rwcal/rift/cal/server/server.py b/rwcal/rift/cal/server/server.py
new file mode 100644 (file)
index 0000000..ef8b0d4
--- /dev/null
@@ -0,0 +1,151 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file cal_server.py
+@author Austin Cormier(austin.cormier@riftio.com)
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import asyncio
+import logging
+import os
+import signal
+import sys
+
+import tornado
+import tornado.httpserver
+import tornado.web
+import tornado.platform.asyncio
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    RwcalYang,
+    RwLog
+)
+
+import rw_peas
+import rift.tasklets
+import rift.rwcal.cloudsim.net
+import rift.rwcal.cloudsim.lvm as lvm
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.shell as shell
+
+from . import app
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class CalServer():
+    HTTP_PORT = 9002
+    cal_interface = None
+
+    @staticmethod
+    def verify_requirements(log):
+        """
+        Check if all the requirements are met
+        1. bridgeutils should be installed
+        2. The user should be root
+        """
+        try:
+            shell.command('/usr/sbin/brctl show')
+        except shell.ProcessError:
+            log.exception('/usr/sbin/brctl command not found, please install '
+                'bridge-utils (yum install bridge-utils)')
+            sys.exit(1)
+
+        if os.geteuid() != 0:
+            log.error("User should be root to start the server.")
+            sys.exit(1)
+
+    def __init__(self, logging_level=logging.DEBUG):
+        self.app = None
+        self.server = None
+        self.log_hdl = RwLog.Ctx.new("a")
+        self.log = logger
+        self.log.setLevel(logging_level)
+
+    def get_cal_interface(self):
+        self.log.debug("Creating CAL interface.")
+        if CalServer.cal_interface is None:
+            plugin = rw_peas.PeasPlugin('rwcal_cloudsim', 'RwCal-1.0')
+            engine, info, extension = plugin()
+
+            CalServer.cal_interface = plugin.get_interface("Cloud")
+            CalServer.cal_interface.init(self.log_hdl)
+
+        return CalServer.cal_interface
+
+    def cleanup(self):
+        self.log.info("Cleaning up resources and backing store.")
+        for container in lxc.containers():
+            self.log.debug("Stopping {}".format(container))
+            lxc.stop(container)
+
+        for container in lxc.containers():
+            lxc.destroy(container)
+
+        lvm.destroy('rift')
+
+
+    def start(self):
+        """Start the server."""
+
+        cal = self.get_cal_interface()
+        account = RwcalYang.CloudAccount(account_type="cloudsim")
+
+        tornado.platform.asyncio.AsyncIOMainLoop().install()
+        loop = asyncio.get_event_loop()
+
+        self.app = app.CalProxyApp(self.log, loop, cal, account)
+        self.server = tornado.httpserver.HTTPServer(self.app)
+
+        self.log.info("Starting Cal Proxy Http Server on port %s",
+                      CalServer.HTTP_PORT)
+        self.server.listen(CalServer.HTTP_PORT)
+
+        def startup():
+            self.log.info("Creating a default network")
+            rift.rwcal.cloudsim.net.virsh_initialize_default()
+            self.log.info("Creating backing store")
+            lvm.create('rift')
+
+        loop.add_signal_handler(signal.SIGHUP, self.cleanup)
+        loop.add_signal_handler(signal.SIGTERM, self.cleanup)
+
+        try:
+            loop.run_in_executor(None, startup)
+            loop.run_forever()
+        except KeyboardInterrupt:
+            self.cleanup()
+        except Exception as exc:
+            self.log.exception(exc)
+
+
+    def stop(self):
+      try:
+         self.server.stop()
+      except Exception:
+         self.log.exception("Caught Exception in LP stop:", sys.exc_info()[0])
+         raise
diff --git a/rwcal/rift/cal/utils.py b/rwcal/rift/cal/utils.py
new file mode 100644 (file)
index 0000000..c99bf9d
--- /dev/null
@@ -0,0 +1,123 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file utils.py
+@author Varun Prasad(varun.prasad@riftio.com)
+@date 2016-06-14
+"""
+
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwLog', '1.0')
+
+from gi.repository import RwcalYang
+import rift.rwcal.cloudsim.net as net
+import rwlogger
+import rw_peas
+
+
+class Logger():
+    """A wrapper to hold all logging related configuration. """
+    LOG_FILE = "/var/log/rift/cloudsim_server.log"
+    FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+
+    def __init__(self, daemon_mode=True, log_name=__name__, log_level=logging.DEBUG):
+        """
+        Args:
+            daemon_mode (bool, optional): If set, then logs are pushed to the
+                    file.
+            log_name (str, optional): Logger name
+            log_level (<Log level>, optional): INFO, DEBUG ..
+        """
+        self.logger = logging.getLogger(log_name)
+        logging.basicConfig(level=log_level, format=self.FORMAT)
+
+        if daemon_mode:
+            handler = logging.FileHandler(self.LOG_FILE)
+            handler.setFormatter(logging.Formatter(self.FORMAT))
+            self.logger.addHandler(handler)
+
+
+
+class CloudSimCalMixin(object):
+    """Mixin class to provide cal plugin and account access to classes.
+    """
+
+    def __init__(self):
+        self._cal, self._account = None, None
+
+    @property
+    def cal(self):
+        if not self._cal:
+            self.load_plugin()
+        return self._cal
+
+    @property
+    def account(self):
+        if not self._account:
+            self.load_plugin()
+        return self._account
+
+    def load_plugin(self):
+        """Load the cal plugin and account
+
+        Returns:
+            Tuple (Cal, Account)
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+        cal = plugin.get_interface("Cloud")
+        rc = cal.init(rwloggerctx)
+
+        account = RwcalYang.CloudAccount()
+        account.account_type = "cloudsim_proxy"
+        account.cloudsim_proxy.host = "192.168.122.1"
+
+        self._cal, self._account = cal, account
+
+
+def check_and_create_bridge(func):
+    """Decorator that checks if a bridge is available in the VM, if not checks
+    for permission and tries to create one.
+    """
+
+    def func_wrapper(*args, **kwargs):
+        logging.debug("Checking if bridge exists")
+
+        if net.bridge_exists('virbr0'):
+            logging.debug("Bridge exists, can proceed with further operations.")
+        else:
+            logging.warning("No Bridge exists, trying to create one.")
+
+            if os.geteuid() != 0:
+                logging.error("No bridge exists and cannot create one due to "
+                    "insufficient privileges. Please create it manually using "
+                    "'virsh net-start default' or re-run the same command as root.")
+                sys.exit(1)
+
+            net.virsh_initialize_default()
+
+        return func(*args, **kwargs)
+
+    return func_wrapper
+
diff --git a/rwcal/src/CMakeLists.txt b/rwcal/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9bbe77f
--- /dev/null
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+add_definitions(-std=gnu99)
+
+add_library(rwcal_api SHARED
+  rwcal_py.c)
+
+target_link_libraries(rwcal_api PRIVATE
+  rwcal-1.0
+  rwcal_yang_gen
+  rwlib
+  rw_vx_plugin
+  peas-1.0)
+
+add_dependencies(rwcal_api rwmanifest_yang.headers)
+
+install(TARGETS rwcal_api LIBRARY DESTINATION usr/lib COMPONENT ${PKG_LONG_NAME})
+
+install(PROGRAMS rwvim.py DESTINATION usr/bin COMPONENT ${PKG_LONG_NAME})
diff --git a/rwcal/src/Makefile b/rwcal/src/Makefile
new file mode 100644 (file)
index 0000000..14f3400
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwcal/src/rwcal_py.c b/rwcal/src/rwcal_py.c
new file mode 100644 (file)
index 0000000..1b9dbda
--- /dev/null
@@ -0,0 +1,60 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+#include <libpeas/peas.h>
+
+#include "rwcal-api.h"
+
+rwcal_module_ptr_t rwcal_module_alloc()
+{
+  rwcal_module_ptr_t rwcal;
+
+  rwcal = (rwcal_module_ptr_t)malloc(sizeof(struct rwcal_module_s));
+  if (!rwcal)
+    return NULL;
+
+  bzero(rwcal, sizeof(struct rwcal_module_s));
+
+  rwcal->framework = rw_vx_framework_alloc();
+  if (!rwcal->framework)
+    goto err;
+
+  rw_vx_require_repository("RwCal", "1.0");
+
+  goto done;
+
+err:
+  rwcal_module_free(&rwcal);
+
+done:
+
+  return rwcal;
+}
+
+void rwcal_module_free(rwcal_module_ptr_t * rwcal)
+{
+  if ((*rwcal)->cloud)
+    g_object_unref((*rwcal)->cloud);
+
+  free(*rwcal);
+  *rwcal = NULL;
+
+  return;
+}
diff --git a/rwcal/src/rwvim.py b/rwcal/src/rwvim.py
new file mode 100755 (executable)
index 0000000..18cf087
--- /dev/null
@@ -0,0 +1,420 @@
+#!/usr/bin/python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 07/24/2014
+# 
+
+"""
+This is a skeletal python tool that invokes the rwcal plugin
+to perform cloud operations.
+"""
+
+import argparse
+import os
+import socket
+import sys
+import logging
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import GObject, Peas, GLib, GIRepository
+from gi.repository import RwCal, RwTypes
+
+def resource_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    status, vminfo = rwcloud.get_vm_list()
+    if vminfo is None:
+        return
+
+    hosts = {}
+
+    # no yaml module installed for Python3, hack for now
+    if cmdargs.hostinfo_file_name:
+        with open(cmdargs.hostinfo_file_name, 'r') as f:
+            lines = f.readlines()
+
+        host = None
+        for l in lines:
+            l = l.strip()
+
+            if l == 'hosts:':
+                continue
+
+            if l == '-':
+                if host:
+                    hosts[host['name']] = host
+                    #hosts.append(host)
+                host = {}
+                continue
+
+            k,v = l.split(':')
+            host[k.strip()] = v.strip()
+
+    # Collect the unique Top of Rack (TOR) swithes
+    tors = set(hosts[vm.host_name]['tor'].lower() for vm in vminfo.vminfo_list)
+
+    print("resources:")
+    for vm in vminfo.vminfo_list:
+        _, __, host_ip_list  = socket.gethostbyaddr(vm.host_name)
+
+        print(" -")
+        print("    name: {}".format(vm.vm_name))
+        print("    osid: {}".format(vm.vm_id))
+        print("    host_name: {}".format(vm.host_name))
+        print("    host_ip: {}".format(host_ip_list[0]))
+        controller, scratch = cmdargs.auth_url[7:].split(':')
+        print("    controller: {}".format(controller))
+        print("    tor: {}".format(hosts[vm.host_name]['tor']))
+        print("    image_name: {}".format(vm.image_name))
+        print("    flavor_name: {}".format(vm.flavor_name))
+        print("    availability_zone: {}".format(vm.availability_zone))
+        print("    private_ip_list: {}".format(
+                sorted(v.ip_address for v in vm.private_ip_list)
+        ))
+        # select the 10.0 network for management ip
+        for p in vm.private_ip_list:
+            if p.ip_address.startswith('10.0.'):
+                print("    ip_address: {}".format(p.ip_address))
+                break;
+
+        print("    public_ip_list: {}".format(
+                [v.ip_address for v in vm.public_ip_list]
+        ))
+        for flavor in flavorinfo.flavorinfo_list:
+            if flavor.name == vm.flavor_name:
+                print("    vcpu: {}".format(flavor.vcpus))
+                print("    ram: {}".format(flavor.memory))
+                print("    disk: {}".format(flavor.disk))
+                print("    host_aggregate_list: {}".format(
+                        [a.name for a in flavor.host_aggregate_list]
+                ))
+                print("    pci_passthrough_device_list: {}".format(
+                        [(d.alias,d.count) for d in flavor.pci_passthrough_device_list]
+                ))
+                # Number of openflow switches this resource connects to are the
+                # number of TOR switches for the pool for demos
+                print("    num_openflow_switches: {}".format(len(tors)))
+                # The number of legacy switches are 0 for demos
+                print("    num_legacy_switches: 0")
+                print("    epa_attributes:")
+
+                # HACK: rw_wag* VMs trusted_execution is always TRUE
+                if vm.vm_name.startswith('rw_wag'):
+                    trusted_execution = 'TRUE'
+                else:
+                    trusted_execution = str(flavor.trusted_host_only).upper()
+                print("        trusted_execution: {}".format(trusted_execution))
+                print("        ddio: {}".format(hosts[vm.host_name]['ddio']))
+                print("        cat: {}".format(hosts[vm.host_name]['cat']))
+                print("        ovs_acceleration: {}".format(hosts[vm.host_name]['ovs_acceleration']))
+                print("        mem_page_size: {}".format(flavor.mem_page_size))
+                if flavor.cpu_threads:
+                    print("        cpu_threads: {}".format(flavor.cpu_threads))
+                print("        cpu_pinning_policy: {}".format(flavor.cpu_policy))
+                # print("            numa_policy: {{ node_cnt: {} }}".format(flavor.numa_node_cnt))
+                print("        numa_node_cnt: {}".format(flavor.numa_node_cnt))
+
+                # if any of the PCI passthrough devices are Coleto Creek
+                # set qat to accel
+                qat=False
+                passthrough=False
+                rrc=False
+                for d in flavor.pci_passthrough_device_list:
+                    if 'COLETO' in d.alias:
+                        qat=True
+                        break
+                    elif '10G' in d.alias:
+                        passthrough=True
+                    elif '100G' in d.alias:
+                        passthrough=True
+                        rrc=True
+                # NOTE: The following can break if SRIOV is used
+                # But for the demos 1,2,3 SRIOV is not used
+                # This is updated logic to set the nic to default to Niantic
+                # if 100G is not in the devise list.
+                if rrc:
+                    print("        nic: RRC")
+                else:
+                    print("        nic: NIANTIC")
+
+                if passthrough or hosts[vm.host_name]['ovs_acceleration'].upper() != 'DISABLED':
+                    print("        dpdk_accelerated: TRUE")
+                else:
+                    print("        dpdk_accelerated: FALSE")
+
+                if passthrough:
+                    print("        pci_passthrough: TRUE")
+                else:
+                    print("        pci_passthrough: FALSE")
+
+                if qat:
+                    print("        quick_assist_policy: MANDATORY")
+                else:
+                    print("        quick_assist_policy: NOACCEL")
+
+                break
+    
+def resource_subcommand(rwcloud, cmdargs):
+    """Process the resources subcommand"""
+
+    if cmdargs.which == 'list':
+        resource_list_subcommand(rwcloud, cmdargs)
+
+def vm_list_subcommand(rwcloud, cmdargs):
+    status, vminfo = rwcloud.get_vm_list()
+    for vm in vminfo.vminfo_list:
+        print(vm)
+
+def vm_show_subcommand(rwcloud, cmdargs):
+    status, vm = rwcloud.get_vm(cmdargs.id)
+    print(vm)
+
+def vm_create_subcommand(cmdargs):
+    pass
+
+def vm_destroy_subcommand(cmdargs):
+    pass
+
+def vm_reboot_subcommand(cmdargs):
+    pass
+
+def vm_start_subcommand(cmdargs):
+    pass
+
+def vm_subcommand(rwcloud, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        vm_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        vm_show_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(cmdargs)
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(cmdargs)
+
+def flavor_list_subcommand(rwcloud, cmdargs):
+    status, flavorinfo = rwcloud.get_flavor_list()
+    for flavor in flavorinfo.flavorinfo_list:
+        print(flavor)
+
+def flavor_show_subcommand(rwcloud, cmdargs):
+    status, flavor = rwcloud.get_flavor(cmdargs.id)
+    print(flavor)
+
+def flavor_subcommand(rwcloud, cmdargs):
+    """Process the flavor subcommand"""
+
+    if cmdargs.which == 'list':
+        flavor_list_subcommand(rwcloud, cmdargs)
+    elif cmdargs.which == 'show':
+        flavor_show_subcommand(rwcloud, cmdargs)
+
+
+def main(args=sys.argv[1:]):
+    logging.basicConfig(format='RWCAL %(message)s')
+
+    ##
+    # Command line argument specification
+    ##
+    desc="""This tool is used to manage the VMs"""
+    parser = argparse.ArgumentParser(description=desc)
+    subparsers = parser.add_subparsers()
+
+    # ipaddr = socket.gethostbyname(socket.getfqdn())
+    # default_auth_url = 'http://%s:35357/v2.0/tokens' % ipaddr
+    default_auth_url = "http://10.64.1.31:35357/v2.0/tokens"
+
+    parser.add_argument('-t', '--provider-type', dest='provider_type',
+                        type=str, default='OPENSTACK',
+                        help='Cloud provider type (default: %(default)s)')
+    parser.add_argument('-u', '--user-name', dest='user',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-p', '--password', dest='passwd',
+                        type=str, default='mypasswd',
+                        help='Password (default: %(default)s)')
+    parser.add_argument('-n', '--tenant-name', dest='tenant',
+                        type=str, default='demo',
+                        help='User name (default: %(default)s)')
+    parser.add_argument('-a', '--auth-url', dest='auth_url',
+                        type=str, default=default_auth_url,
+                        help='Password (default: %(default)s)')
+
+    ##
+    # Subparser for Resources
+    ##
+    resource_parser = subparsers.add_parser('resource')
+    resource_subparsers = resource_parser.add_subparsers()
+
+    # List resource subparser
+    resource_list_parser = resource_subparsers.add_parser('list')
+    resource_list_parser.set_defaults(which='list')
+    resource_list_parser.add_argument('-f', '--hostinfo-file-name', 
+                                  dest='hostinfo_file_name', 
+                                  required=True,
+                                  type=str,
+                                  help='name of the static yaml file containing host information')
+
+    resource_parser.set_defaults(func=resource_subcommand)
+
+    ##
+    # Subparser for Flavor
+    ##
+    flavor_parser = subparsers.add_parser('flavor')
+    flavor_subparsers = flavor_parser.add_subparsers()
+
+    # List flavor subparser
+    flavor_list_parser = flavor_subparsers.add_parser('list')
+    flavor_list_parser.set_defaults(which='list')
+
+    # Show flavor subparser
+    flavor_show_parser = flavor_subparsers.add_parser('show')
+    flavor_show_parser.add_argument('id', type=str)
+    flavor_show_parser.set_defaults(which='show')
+
+    flavor_parser.set_defaults(func=flavor_subcommand)
+
+    ##
+    # Subparser for VM
+    ##
+    vm_parser = subparsers.add_parser('vm')
+    vm_subparsers = vm_parser.add_subparsers()
+
+    # Create VM subparser
+    vm_create_parser = vm_subparsers.add_parser('create')
+    vm_create_parser.add_argument('-c', '--count',
+                                  type=int, default=1,
+                                  help='The number of VMs to launch '
+                                       '(default: %(default)d)')
+    vm_create_parser.add_argument('-i', '--image',
+                                  default='rwopenstack_vm',
+                                  help='Specify the image for the VM')
+    vm_create_parser.add_argument('-n', '--name',
+                                  help='Specify the name of the VM')
+    vm_create_parser.add_argument('-f', '--flavor',
+                                  help='Specify the flavor for the VM')
+    vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms',
+                                  action='store_true', help='reserve any newly created VMs')
+    vm_create_parser.add_argument('-s', '--single', dest='wait_after_create',
+                                  action='store_true', 
+                                  help='wait for each VM to start before creating the next')
+
+    vm_create_parser.set_defaults(which='create')
+
+    # Reboot VM subparser
+    vm_reboot_parser = vm_subparsers.add_parser('reboot')
+    group = vm_reboot_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM')
+    group.add_argument('-a', '--reboot-all',
+                       dest='reboot_all', action='store_true',
+                       help='Reboot all VMs')
+    vm_reboot_parser.add_argument('-s', '--sleep', 
+                                  dest='sleep_time', 
+                                  type=int, default=4, 
+                                  help='time in seconds to sleep between reboots')
+    vm_reboot_parser.set_defaults(which='reboot')
+
+    # Destroy VM subparser
+    vm_destroy_parser = vm_subparsers.add_parser('destroy')
+    group = vm_destroy_parser.add_mutually_exclusive_group()
+    group.add_argument('-n', '--vm-name', dest='vm_name',
+                       type=str,
+                       help='Specify the name of the VM (accepts regular expressions)')
+    group.add_argument('-a', '--destroy-all',
+                       dest='destroy_all', action='store_true',
+                       help='Delete all VMs')
+    group.add_argument('-w', '--wait',
+                       dest='wait', action='store_true',
+                       help='destroy all and wait until all VMs have exited')
+    vm_destroy_parser.set_defaults(which='destroy')
+
+    # List VM subparser
+    vm_list_parser = vm_subparsers.add_parser('list')
+    vm_list_parser.set_defaults(which='list')
+    vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly',
+                                action='store_true',
+                                help='only list IP addresses')
+
+    # Show vm subparser
+    vm_show_parser = vm_subparsers.add_parser('show')
+    vm_show_parser.add_argument('id', type=str)
+    vm_show_parser.set_defaults(which='show')
+    vm_parser.set_defaults(func=vm_subcommand)
+
+    cmdargs = parser.parse_args(args)
+
+    # Open the peas engine
+    engine = Peas.Engine.get_default()
+
+    # Load our plugin proxy into the g_irepository namespace
+    default = GIRepository.Repository.get_default()
+    GIRepository.Repository.require(default, "RwCal", "1.0", 0)
+
+    # Enable python language loader
+    engine.enable_loader("python3");
+
+    # Set the search path for peas engine,
+    # rift-shell sets the PLUGINDIR and GI_TYPELIB_PATH
+    paths = set([])
+    paths = paths.union(os.environ['PLUGINDIR'].split(":"))
+    for path in paths:
+        engine.add_search_path(path, path)
+
+    # Load the rwcal python plugin and create the extension.
+    info = engine.get_plugin_info("rwcal-plugin")
+    if info is None:
+        print("Error loading rwcal-python plugin")
+        sys.exit(1)
+    engine.load_plugin(info)
+    rwcloud = engine.create_extension(info, RwCal.Cloud, None)
+
+    # For now cloud credentials are hard coded
+    if cmdargs.provider_type == 'OPENSTACK':
+        provider_type = RwCal.CloudType.OPENSTACK_AUTH_URL
+    elif cmdargs.provider_type == 'EC2_US_EAST':
+        provider_type = RwCal.CloudType.EC2_US_EAST
+    elif cmdargs.provider_type == 'VSPHERE':
+        provider_type = RwCal.CloudType.VSPHERE
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+
+    if not 'RIFT_SHELL' in os.environ:
+        sys.stderr.write("This tool should be run from inside a rift-shell")
+
+    status = rwcloud.init(provider_type, 
+                          cmdargs.user, 
+                          cmdargs.passwd, 
+                          cmdargs.auth_url,
+                          cmdargs.tenant);
+
+    assert status == RwTypes.RwStatus.SUCCESS
+
+    cmdargs.func(rwcloud, cmdargs)
+
+if __name__ == "__main__":
+    main()
+
diff --git a/rwcal/test/CMakeLists.txt b/rwcal/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..79e66c5
--- /dev/null
@@ -0,0 +1,67 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 06/27/2014
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs cal_module_test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+# rift_gtest(unittest_rwcal_cloud
+#   TEST_SRCS rwcal_cloud_gtest.cpp
+#   TEST_LIBS
+#     rwcal_api
+#     rwcal_yang_gen
+# )
+
+rift_gtest(unittest_rwcal_callback
+  TEST_SRCS rwcal_callback_gtest.cpp
+  TEST_LIBS
+    rwcal-1.0
+    rwcal_api
+)
+
+##
+# Add the basic plugin python test
+##
+#rift_py3test(openstack_cal_tests
+#  LONG_UNITTEST_TARGET
+#  TEST_ARGS -m pytest --junit-xml=${RIFT_UNITTEST_DIR}/openstack_cal/unittest.xml #${CMAKE_CURRENT_SOURCE_DIR}/test_rwcal_openstack_pytest.py
+#)
+
+
+add_executable(rwcal_dump rwcal_dump.cpp)
+target_link_libraries(rwcal_dump
+  rwcal_api
+  rwlib
+  rwyang
+  rwcal_yang_gen
+  CoreFoundation
+  glib-2.0
+  protobuf-c
+)
+
+# added for 4.0
+install(
+  FILES 
+    RIFT.ware-ready.py 
+    openstack_resources.py
+  DESTINATION usr/bin
+  COMPONENT ${PKG_LONG_NAME}
+)
+
diff --git a/rwcal/test/RIFT.ware-ready.py b/rwcal/test/RIFT.ware-ready.py
new file mode 100755 (executable)
index 0000000..1cd69f1
--- /dev/null
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import sys
+from rift.rwcal.openstack.openstack_drv import OpenstackDriver
+
+
+
+def test_openstack(drv):
+    print("checking endpoints")
+    for ep in [ 'compute', 'image', 'network', 'metering' ]: 
+        url = drv.ks_drv.get_service_endpoint(ep, 'publicURL')
+        print("%s: %s" % ( ep, url))
+        if re.search(url, '127.0.0'): 
+            raise Exception("endpoint %s is using a loopback URL: %s" % ( ep, url))
+
+    def verify(name, min, count):
+        if count < min:
+            raise Exception("only %d instances of %s found. Minimum is %d" % ( count, name, min))
+        print("found %d %s" % ( count, name ))
+        
+    verify("images"     , 1, len(drv.glance_image_list()))
+    verify("flavors "    , 1, len(drv.nova_flavor_list()))
+    verify("floating ips "    , 1, len(drv.nova_floating_ip_list()))
+    verify("servers"     , 0, len(drv.nova_server_list()))
+    verify("networks"     , 1, len(drv.neutron_network_list()))
+    verify("subnets"     , 1, len(drv.neutron_subnet_list()))
+    verify("ports"         , 1, len(drv.neutron_port_list()))
+    #verify("ceilometers"     , 1, len(drv.ceilo_meter_list()))
+    
+
+
+if len(sys.argv) != 6:
+    print("ARGS are admin_user admin_password auth_url tenant_name mgmt_network_name")
+    print("e.g. %s pluto mypasswd http://10.95.4.2:5000/v3 demo private" % __file__ )
+    sys.exit(1)
+
+args=tuple(sys.argv[1:6])
+print("Using args \"%s\"" % ",".join(args))
+
+try:
+    v3 = OpenstackDriver(*args)
+except Exception as e:
+    print("\n\nunable to instantiate a endpoint: %s" % e)
+else:
+    print("\n\n endpoint instantiated")
+    try:
+        test_openstack(v3)
+    except Exception as e:
+        print("\n\nendpoint verification failed: %s" % e)
+    else:
+        print("\n\nSUCCESS! openstack is working")
+        sys.exit(0)
+
+
+
+sys.exit(1)
+
+
+# need to check if any public urls are loopbacks
+# need to check DNS is set up right 
+#    neutron subnet-show private_subnet
+#    host repo.riftio.com  10.64.1.3
+
diff --git a/rwcal/test/aws_resources.py b/rwcal/test/aws_resources.py
new file mode 100644 (file)
index 0000000..875de56
--- /dev/null
@@ -0,0 +1,370 @@
+#!/usr/bin/python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import os
+import sys
+import uuid
+import rw_peas
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import argparse
+import logging
+import rwlogger
+import boto3
+import botocore
+
+persistent_resources = {
+    'vms'      : [],
+    'networks' : [],
+}
+
+MISSION_CONTROL_NAME = 'mission-control'
+LAUNCHPAD_NAME = 'launchpad'
+
+RIFT_IMAGE_AMI = 'ami-7070231a'
+
+logging.basicConfig(level=logging.ERROR)
+logger = logging.getLogger('rift.cal.awsresources')
+logger.setLevel(logging.INFO)
+
+def get_cal_plugin():
+    """
+        Load AWS cal plugin
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except Exception as e:
+        logger.error("ERROR:Cal plugin instantiation failed with exception %s",repr(e))
+    else:
+        logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+def get_cal_account(**kwargs):
+    """
+    Returns AWS cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "aws"
+    account.aws.key = kwargs['key']
+    account.aws.secret = kwargs['secret']
+    account.aws.region = kwargs['region']
+    if 'ssh_key' in kwargs and kwargs['ssh_key'] is not None:
+        account.aws.ssh_key = kwargs['ssh_key']
+    account.aws.availability_zone = kwargs['availability_zone']
+    if 'vpcid' in kwargs and kwargs['vpcid'] is not None: 
+        account.aws.vpcid =  kwargs['vpcid']
+    if 'default_subnet_id' in kwargs and kwargs['default_subnet_id'] is not None:
+        account.aws.default_subnet_id = kwargs['default_subnet_id']
+    return account
+
+class AWSResources(object):
+    """
+    Class with methods to manage AWS resources
+    """
+    def __init__(self,**kwargs):
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account(**kwargs)
+
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        driver = self._cal._get_driver(self._acct)
+        subnets = driver.get_subnet_list()
+        subnet_list = [subnet for subnet in subnets if subnet.default_for_az is False]
+
+        logger.info("Deleting Networks : %s" %([x.id for x in subnet_list]))
+        for subnet in subnet_list:
+            self._cal.delete_virtual_link(self._acct, subnet.subnet_id)
+        logger.info("Network cleanup complete")
+
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up AWS resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        logger.info("Cleaning up AWS resources.......[Done]")
+
+    def _destroy_mission_control(self):
+        """
+        Destroy Mission Control VM
+        """
+        logger.info("Initiating MC VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == MISSION_CONTROL_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("MC VM cleanup complete")
+
+    def _destroy_launchpad(self):
+        """
+        Destroy Launchpad VM
+        """
+        logger.info("Initiating LP VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name == LAUNCHPAD_NAME]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+        logger.info("LP VM cleanup complete")
+        
+
+    def create_mission_control(self):
+        """
+        Create Mission Control VM in AWS
+        """ 
+        logger.info("Creating mission control VM")
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = MISSION_CONTROL_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.large'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = "#cloud-config\n\nruncmd:\n - echo Sleeping for 5 seconds and attempting to start salt-master\n - sleep 5\n - /bin/systemctl restart salt-master.service\n"
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._mc_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._mc_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._mc_id)
+        assert rc == RwStatus.SUCCESS
+        self._mc_public_ip = rs.public_ip
+        self._mc_private_ip = rs.management_ip
+        
+        logger.info("Started Mission Control VM with id %s and IP Address %s\n",self._mc_id, self._mc_public_ip)
+
+    def create_launchpad_vm(self, salt_master = None):        
+        """
+        Create Launchpad VM in AWS
+        Arguments
+            salt_master (String): String with Salt master IP typically MC VM private IP
+        """
+        logger.info("Creating launchpad VM")
+        USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+        try:
+            fd = open(USERDATA_FILENAME, 'r')
+        except Exception as e:
+                sys.exit(-1)
+        else:
+            LP_USERDATA_FILE = fd.read()
+            # Run the enable lab script when the openstack vm comes up
+            LP_USERDATA_FILE += "runcmd:\n"
+            LP_USERDATA_FILE += " - echo Sleeping for 5 seconds and attempting to start elastic-network-interface\n"
+            LP_USERDATA_FILE += " - sleep 5\n"
+            LP_USERDATA_FILE += " - /bin/systemctl restart elastic-network-interfaces.service\n"
+
+        if salt_master is None:
+            salt_master=self._mc_private_ip
+        node_id = str(uuid.uuid4())
+
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = LAUNCHPAD_NAME
+        vdu.image_id = RIFT_IMAGE_AMI
+        vdu.flavor_id = 'c3.xlarge'
+        vdu.allocate_public_address = True
+        vdu.vdu_init.userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                          lxcname = node_id)
+        vdu.node_id = node_id
+
+        rc,rs=self._cal.create_vdu(self._acct,vdu)
+        assert rc == RwStatus.SUCCESS
+        self._lp_id = rs
+
+        driver = self._cal._get_driver(self._acct)
+        inst=driver.get_instance(self._lp_id)
+        inst.wait_until_running()
+
+        rc,rs =self._cal.get_vdu(self._acct,self._lp_id)
+        assert rc == RwStatus.SUCCESS
+
+        self._lp_public_ip = rs.public_ip
+        self._lp_private_ip = rs.management_ip
+        logger.info("Started Launchpad VM with id %s and IP Address %s\n",self._lp_id, self._lp_public_ip)
+         
+    def upload_ssh_key_to_ec2(self):
+        """
+         Upload SSH key to EC2 region
+        """
+        driver = self._cal._get_driver(self._acct)
+        key_name = os.getlogin() + '-' + 'sshkey' 
+        key_path = '%s/.ssh/id_rsa.pub' % (os.environ['HOME'])
+        if os.path.isfile(key_path):
+            logger.info("Uploading ssh public key file in path %s with keypair name %s", key_path,key_name)
+            with open(key_path) as fp:
+                driver.upload_ssh_key(key_name,fp.read())
+        else:
+            logger.error("Valid Public key file %s not found", key_path)
+
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage AWS resources')
+
+    parser.add_argument('--aws-key',
+                        action = 'store',
+                        dest = 'aws_key',
+                        type = str,
+                        help='AWS key')
+
+    parser.add_argument('--aws-secret',
+                        action = 'store',
+                        dest = 'aws_secret',
+                        type = str,
+                        help='AWS secret')
+
+    parser.add_argument('--aws-region',
+                        action = 'store',
+                        dest = 'aws_region',
+                        type = str,
+                        help='AWS region')
+
+    parser.add_argument('--aws-az',
+                        action = 'store',
+                        dest = 'aws_az',
+                        type = str,
+                        help='AWS Availability zone')
+
+    parser.add_argument('--aws-sshkey',
+                        action = 'store',
+                        dest = 'aws_sshkey',
+                        type = str,
+                        help='AWS SSH Key to login to instance')
+
+    parser.add_argument('--aws-vpcid',
+                        action = 'store',
+                        dest = 'aws_vpcid',
+                        type = str,
+                        help='AWS VPC ID to use to indicate non default VPC')
+
+    parser.add_argument('--aws-default-subnet',
+                        action = 'store',
+                        dest = 'aws_default_subnet',
+                        type = str,
+                        help='AWS Default subnet id in VPC to be used for mgmt network')
+
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if only launchpad  VM is being created.')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for AWS installation. \n Possible options are {all, mc, lp,  vms, networks }')
+
+    parser.add_argument('--upload-ssh-key',
+                         action = 'store_true',
+                         dest = 'upload_ssh_key',
+                         help = 'Upload users SSH public key ~/.ssh/id_rsa.pub')  
+
+    argument = parser.parse_args()
+
+    if (argument.aws_key is None or argument.aws_secret is None or argument.aws_region is None or
+       argument.aws_az is None):
+        logger.error("Missing mandatory params. AWS Key, Secret, Region, AZ and SSH key are mandatory params")
+        sys.exit(-1)
+
+    if (argument.cleanup is None and argument.mission_control is None and argument.launchpad is None 
+        and argument.upload_ssh_key is None):
+        logger.error('Insufficient parameters')
+        sys.exit(-1)
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = AWSResources(key=argument.aws_key, secret=argument.aws_secret, region=argument.aws_region, availability_zone = argument.aws_az, 
+                       ssh_key = argument.aws_sshkey, vpcid = argument.aws_vpcid, default_subnet_id = argument.aws_default_subnet)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+    if argument.upload_ssh_key:
+         drv.upload_ssh_key_to_ec2()
+
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+            if r_type == 'mc':
+                drv._destroy_mission_control()
+            if r_type == 'lp':
+                drv._destroy_launchpad()
+
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        if argument.salt_master is None and argument.mission_control is False:
+            logger.error('Salt Master IP address not provided to start Launchpad.')
+            sys.exit(-2)
+
+        drv.create_launchpad_vm(argument.salt_master)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwcal/test/cal_module_test/CMakeLists.txt b/rwcal/test/cal_module_test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f637c28
--- /dev/null
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 21/01/2016
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    cal_module_test
+  DESTINATION usr/rift/systemtest/cal_module_test
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/conftest.py
+    pytest/cal_module_test.py
+  DESTINATION usr/rift/systemtest/cal_module_test/pytest
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/cal_module_test.racfg
+  DESTINATION
+    usr/rift/systemtest/cal_module_test
+    COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcal/test/cal_module_test/cal_module_test b/rwcal/test/cal_module_test/cal_module_test
new file mode 100755 (executable)
index 0000000..d7f21b6
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SYS_TEST=$RIFT_INSTALL/usr/rift/systemtest/
+PYTEST_DIR=$SYS_TEST/cal_module_test/pytest
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/cal_module_test.py"
+test_prefix="cal_module_test"
+TEST_NAME="TC_CAL_MODULE_TEST"
+RESULT_XML="cal_module_test.xml"
+
+parse_args "${@}"
+test_cmd="${SCRIPT_TEST}"
+append_args test_cmd os-host "\"${cloud_host}\""
+append_args test_cmd os-user "\"${user}\""
+append_args test_cmd os-tenant ${tenant[0]}
+append_args test_cmd junitprefix "\"${TEST_NAME}\""
+append_args test_cmd junitxml "\"${RIFT_MODULE_TEST}/${RESULT_XML}\""
+
+cd "${PYTEST_DIR}"
+eval ${test_cmd}
+
diff --git a/rwcal/test/cal_module_test/pytest/cal_module_test.py b/rwcal/test/cal_module_test/pytest/cal_module_test.py
new file mode 100644 (file)
index 0000000..ca3568f
--- /dev/null
@@ -0,0 +1,669 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file cal_test.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 22-Jan-2016
+
+"""
+
+import abc
+import logging
+import os
+import multiprocessing
+import signal
+import time
+import uuid
+import hashlib
+
+import pytest
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+# import rift.cal.server as cal_server
+import rw_peas
+import rwlogger
+        
+
+logger = logging.getLogger('rwcal')
+logging.basicConfig(level=logging.INFO)
+
+
+class CloudConfig(object):
+    def __init__(self, cal, account):
+        self.cal = cal
+        self.account = account
+
+    def check_state(self, object_id, object_api, expected_state, state_attr_name="state"):
+        """For a given object (Vm, port etc) checks if the object has
+        reached the expected state.
+        """
+        get_object = getattr(self.cal, object_api)
+        for i in range(100):  # 100 poll iterations...
+            rc, rs = get_object(self.account, object_id)
+
+            curr_state = getattr(rs, state_attr_name)
+            if curr_state == expected_state:
+                break
+            else:
+                time.sleep(2)
+
+        rc, rs = get_object(self.account, object_id)
+        assert rc == RwStatus.SUCCESS
+        assert getattr(rs, state_attr_name) == expected_state
+
+    def start_server(self):
+        pass
+
+    def stop_server(self):
+        pass
+
+    @abc.abstractmethod
+    def _cal(self):
+        pass
+
+    @abc.abstractmethod
+    def _account(self, option):
+        pass
+
+    @abc.abstractmethod
+    def flavor(self):
+        pass
+
+    @abc.abstractmethod
+    def vdu(self):
+        pass
+
+    @abc.abstractmethod
+    def image(self):
+        pass
+
+    @abc.abstractmethod
+    def virtual_link(self):
+        pass
+
+
+class Aws(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+        """
+        self.image_id = 'ami-7070231a'
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("AWS Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+
+        Return:
+            CloudAccount details
+        """
+        account = RwcalYang.CloudAccount.from_dict({
+                "account_type": "aws",
+                "aws": {
+                    "key": option.aws_user,
+                    "secret": option.aws_password,
+                    "region": option.aws_region,
+                    "availability_zone": option.aws_zone,
+                    "ssh_key": option.aws_ssh_key
+                }
+            })
+
+        return account
+
+    def flavor(self):
+        """
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "vm_flavor": {
+                        "memory_mb": 1024,
+                        "vcpu_count": 1,
+                        "storage_gb": 0
+                    }
+            })
+
+        return flavor
+
+    def vdu(self):
+        """Provide AWS specific VDU config.
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": "t2.micro"
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def image(self):
+        raise NotImplementedError("Image create APIs are not implemented for AWS")
+
+    def virtual_link(self):
+        """Provide Vlink config
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '172.31.64.0/20',
+            })
+
+        return vlink
+
+
+class Cloudsim(CloudConfig):
+    def __init__(self, option):
+        self.image_id = None
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        self.server_process = None
+
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _md5(fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+                                    
+    def start_server(self):
+        logger = logging.getLogger(__name__)
+        server = cal_server.CloudsimServerOperations(logger)
+        self.server_process = multiprocessing.Process(
+                target=server.start_server,
+                args=(True,))
+        self.server_process.start()
+
+        # Sleep till the backup store is set up
+        time.sleep(30)
+
+    def stop_server(self):
+        self.server_process.terminate()
+
+        # If the process is not killed within the timeout, send a SIGKILL.
+        time.sleep(15)
+        if self.server_process.is_alive():
+            os.kill(self.server_process.pid, signal.SIGKILL)
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_cloudsimproxy', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Cloudsim Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """
+        Args:
+            option (OptionParser): OptionParser instance.
+
+        Return:
+            CloudAccount details
+        """
+        account = RwcalYang.CloudAccount.from_dict({
+                'name': "cloudsim",
+                'account_type':'cloudsim_proxy'})
+
+        return account
+
+    def image(self):
+        """Provides Image config for openstack.
+
+        Returns:
+            ImageInfoItem
+        """
+        image = RwcalYang.ImageInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
+                "disk_format": "qcow2",
+                "container_format": "bare",
+                "checksum": self._md5(os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2")),
+            })
+        return image
+
+    def flavor(self):
+        """Flavor config for openstack
+
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "vm_flavor": {
+                        "memory_mb": 16392,
+                        "vcpu_count": 4,
+                        "storage_gb": 40
+                }})
+
+        return flavor
+
+    def vdu(self):
+        """Returns VDU config
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": self.flavor_id,
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def virtual_link(self):
+        """vlink config for Openstack
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '192.168.1.0/24',
+            })
+
+        return vlink
+
+
+class Openstack(CloudConfig):
+    def __init__(self, option):
+        """
+        Args:
+            option (OptionParser)
+        """
+        self.image_id = None
+        self.virtual_link_id = None
+        self.flavor_id = None
+        self.vdu_id = None
+
+        super().__init__(self._cal(), self._account(option))
+
+    def _cal(self):
+        """
+        Loads rw.cal plugin via libpeas
+        """
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+        cal = plugin.get_interface("Cloud")
+        try:
+            rc = cal.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+        return cal
+
+    def _account(self, option):
+        """Cloud account information for Account
+
+        Returns:
+            CloudAccount
+        """
+        acct = RwcalYang.CloudAccount.from_dict({
+            "account_type": "openstack",
+            "openstack": {
+                    "key": option.os_user,
+                    "secret": option.os_password,
+                    "auth_url": 'http://{}:5000/v3/'.format(option.os_host),
+                    "tenant": option.os_tenant,
+                    "mgmt_network": option.os_network
+                }
+            })
+
+        return acct
+    
+    def _md5(self, fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+
+    def image(self):
+        """Provides Image config for openstack.
+
+        Returns:
+            ImageInfoItem
+        """
+        image = RwcalYang.ImageInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
+                "disk_format": "qcow2",
+                "container_format": "bare",
+                "checksum": self._md5(os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2")),
+            })
+        return image
+
+    def flavor(self):
+        """Flavor config for openstack
+
+        Returns:
+            FlavorInfoItem
+        """
+        flavor = RwcalYang.FlavorInfoItem.from_dict({
+                "name": str(uuid.uuid4()),
+                "vm_flavor": {
+                        "memory_mb": 16392,
+                        "vcpu_count": 4,
+                        "storage_gb": 40
+                },
+                "guest_epa": {
+                        "cpu_pinning_policy": "DEDICATED",
+                        "cpu_thread_pinning_policy": "SEPARATE",
+                }})
+
+        numa_node_count = 2
+        flavor.guest_epa.numa_node_policy.node_cnt = numa_node_count
+        for i in range(numa_node_count):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0, 1]
+            elif i == 1:
+                node.vcpu = [2, 3]
+            node.memory_mb = 8196
+
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = "PCI_10G_ALIAS"
+        dev.count = 1
+
+        return flavor
+
+    def vdu(self):
+        """Returns VDU config
+
+        Returns:
+            VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams.from_dict({
+                "name": str(uuid.uuid4()),
+                "node_id": "123456789012345",
+                "image_id": self.image_id,
+                "flavor_id": self.flavor_id,
+            })
+
+        c1 = vdu.connection_points.add()
+        c1.name = str(uuid.uuid4())
+        c1.virtual_link_id = self.virtual_link_id
+
+        return vdu
+
+    def virtual_link(self):
+        """vlink config for Openstack
+
+        Returns:
+            VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams.from_dict({
+                    "name": str(uuid.uuid4()),
+                    "subnet": '192.168.1.0/24',
+            })
+
+        return vlink
+
+
+@pytest.fixture(scope="module", params=[Openstack], ids=lambda val: val.__name__)
+def cloud_config(request):
+    return request.param(request.config.option)
+
+
+@pytest.mark.incremental
+class TestCalSetup:
+
+    def test_start_server(self, cloud_config):
+        cloud_config.start_server()
+
+    def test_flavor_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new flavor is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_flavor_id = cal.create_flavor(account, cloud_config.flavor())
+        cloud_config.flavor_id = new_flavor_id
+        assert status == RwStatus.SUCCESS
+
+        status, flavors = cal.get_flavor_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for flavor in flavors.flavorinfo_list:
+            status, flavor_single = cal.get_flavor(account, flavor.id)
+            assert status == RwStatus.SUCCESS
+            assert flavor.id == flavor_single.id
+            ids.append(flavor.id)
+
+        assert new_flavor_id in ids
+
+    def test_image_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new image is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) is Aws:
+            # Hack!
+            new_image_id = "ami-7070231a"
+        else:
+            status, new_image_id = cal.create_image(account, cloud_config.image())
+            cloud_config.image_id = new_image_id
+            assert status == RwStatus.SUCCESS
+            cloud_config.check_state(new_image_id, "get_image", "active")
+
+
+        status, images = cal.get_image_list(account)
+
+        ids = []
+        for image in images.imageinfo_list:
+            status, image_single = cal.get_image(account, image.id)
+            assert status == RwStatus.SUCCESS
+            assert image_single.id == image.id
+            ids.append(image.id)
+
+        assert new_image_id in ids
+
+    def test_virtual_link_create(self, cloud_config):
+        """
+        Asserts:
+            1. If the new Vlink is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vlink_id = cal.create_virtual_link(account, cloud_config.virtual_link())
+        cloud_config.virtual_link_id = new_vlink_id
+        assert status.status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vlink_id, "get_virtual_link", "active")
+
+        status, vlinks = cal.get_virtual_link_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vlink in vlinks.virtual_link_info_list:
+            status, vlink_single = cal.get_virtual_link(account, vlink.virtual_link_id)
+            assert status == RwStatus.SUCCESS
+            assert vlink_single.virtual_link_id == vlink.virtual_link_id
+            ids.append(vlink.virtual_link_id)
+
+        assert new_vlink_id in ids
+
+    def test_vdu_apis(self, cloud_config):
+        """
+        Asserts:
+            1. If the new VDU is created and available via read APIs
+            2. Verifies the READ APIs
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status, new_vdu_id = cal.create_vdu(account, cloud_config.vdu())
+        cloud_config.vdu_id = new_vdu_id
+        assert status.status == RwStatus.SUCCESS
+        cloud_config.check_state(new_vdu_id, "get_vdu", "active")
+
+        status, vdus = cal.get_vdu_list(account)
+        assert status == RwStatus.SUCCESS
+
+        ids = []
+        for vdu in vdus.vdu_info_list:
+            status, vdu_single = cal.get_vdu(account, vdu.vdu_id)
+            assert status == RwStatus.SUCCESS
+            assert vdu_single.vdu_id == vdu.vdu_id
+            ids.append(vdu.vdu_id)
+
+        assert new_vdu_id in ids
+
+    def test_modify_vdu_api(self, cloud_config):
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        vdu_modify = RwcalYang.VDUModifyParams()
+        vdu_modify.vdu_id = cloud_config.vdu_id
+        c1 = vdu_modify.connection_points_add.add()
+        c1.name = "c_modify1"
+        # Set the new vlink
+        c1.virtual_link_id = cloud_config.virtual_link_id
+
+        status = cal.modify_vdu(account, vdu_modify)
+        assert status == RwStatus.SUCCESS
+
+@pytest.mark.incremental
+class TestCalTeardown:
+    def test_flavor_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If flavor is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_flavor(account, cloud_config.flavor_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_image_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If image is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        if type(cloud_config) != Aws:
+            status = cal.delete_image(account, cloud_config.image_id)
+            assert status == RwStatus.SUCCESS
+
+    def test_virtual_link_delete(self, cloud_config):
+        """
+        Asserts:
+            1. If VLink is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_virtual_link(account, cloud_config.virtual_link_id)
+        assert status == RwStatus.SUCCESS
+
+    def test_delete_vdu(self, cloud_config):
+        """
+        Asserts:
+            1. If VDU is deleted
+        """
+        account = cloud_config.account
+        cal = cloud_config.cal
+
+        status = cal.delete_vdu(account, cloud_config.vdu_id)
+        assert status == RwStatus.SUCCESS
+
+    def test_stop_server(self, cloud_config):
+        cloud_config.stop_server()
diff --git a/rwcal/test/cal_module_test/pytest/conftest.py b/rwcal/test/cal_module_test/pytest/conftest.py
new file mode 100644 (file)
index 0000000..c4b6705
--- /dev/null
@@ -0,0 +1,37 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file conftest.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 21/01/2016
+
+"""
+
+def pytest_addoption(parser):
+    # Openstack related options
+    parser.addoption("--os-host", action="store", default="10.66.4.102")
+    parser.addoption("--os-user", action="store", default="pluto")
+    parser.addoption("--os-password", action="store", default="mypasswd")
+    parser.addoption("--os-tenant", action="store", default="demo")
+    parser.addoption("--os-network", action="store", default="private")
+
+    # aws related options
+    parser.addoption("--aws-user", action="store", default="AKIAIKRDX7BDLFU37PDA")
+    parser.addoption("--aws-password", action="store", default="cjCRtJxVylVkbYvOUQeyvCuOWAHieU6gqcQw29Hw")
+    parser.addoption("--aws-region", action="store", default="us-east-1")
+    parser.addoption("--aws-zone", action="store", default="us-east-1c")
+    parser.addoption("--aws-ssh-key", action="store", default="vprasad-sshkey")
diff --git a/rwcal/test/cal_module_test/racfg/cal_module_test.racfg b/rwcal/test/cal_module_test/racfg/cal_module_test.racfg
new file mode 100644 (file)
index 0000000..cd6d57a
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_CAL_MODULE_TESTS",
+  "commandline":"./cal_module_test --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants}",
+  "target_vm":"VM",
+  "test_description":"System test targeting module tests for CAL accounts",
+  "run_as_root": true,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2400,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 8192,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwcal/test/cloudtool_cal.py b/rwcal/test/cloudtool_cal.py
new file mode 100755 (executable)
index 0000000..92f4891
--- /dev/null
@@ -0,0 +1,989 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+import os,sys,platform
+import socket
+import time
+import re
+import logging
+
+from pprint import pprint
+import argparse
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+import time
+
+global nova
+nova = None
+
+def wait_till_active(driver, account, vm_id_list, timeout):                                                                                                              
+    """
+    Wait until VM reaches ACTIVE state. 
+    """
+    # Wait while VM goes to required state
+
+    start = time.time()
+    end = time.time() + timeout
+    done = False;
+
+    while ( time.time() < end ) and not done:
+       done = True      
+       for vm_id in vm_id_list:
+           rc, rs = driver.get_vm(account, vm_id)
+           assert rc == RwStatus.SUCCESS
+           if rs.state != 'ACTIVE':
+               done = False               
+               time.sleep(2)
+
+
+def get_image_name(node):
+    images = driver.list_images()
+    for i in images:
+        if i.id == node.extra['imageId']:
+            return i.name
+    return None
+
+def get_flavor_name(flavorid):
+    global nova
+    if nova is None:
+        nova = ra_nova_connect(project='admin')
+    for f in nova.flavors.list(True):
+         if f.id == flavorid: 
+             return f.name
+    return None
+
+def hostname():
+    return socket.gethostname().split('.')[0]
+
+def vm_register(id, driver, account, cmdargs, header=True):
+    if testbed is None:
+        print("Cannot register VM without reservation system")
+        return False
+
+    if cmdargs.reserve_new_vms:
+        user=os.environ['USER']
+    else:
+        user=None
+    fmt="%-28s %-12s %-12s %-15s"
+    if header:
+        print('VM                           controller   compute      mgmt ip')
+        print('---------------------------- ------------ ------------ ---------------')
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    for node in nodes.vminfo_list:
+        if id == 'all' or node.vm_id == id:
+            flavor = driver.get_flavor(account, node.flavor_id)
+            assert rc == RwStatus.SUCCESS
+            ip = node.management_ip
+            
+            huge = 'DISABLED'      
+            if flavor.guest_epa.mempage_size == 'LARGE':
+                huge = flavor.guest_epa.mempage_size                                                               
+            #compute = utils.find_resource(nova.servers, node.id)
+            #compute_name = compute._info['OS-EXT-SRV-ATTR:hypervisor_hostname'].split('.')[0]
+            compute_name = hostname()      
+            try:
+                testbed.add_resource(node.vm_name, hostname(), ip, flavor.vm_flavor.memory_mb, flavor.vm_flavor.vcpu_count, user, flavor.name, compute=compute_name, huge_pages=huge )
+                print(fmt % ( node.vm_name, hostname(), compute_name, ip )) 
+            except Exception as e:
+                print("WARNING: Error \"%s\"adding resource to reservation system" % e)
+
+class OFromDict(object):
+  def __init__(self, d):
+    self.__dict__ = d
+
+
+def vm_create_subcommand(driver, account, cmdargs):
+    """Process the VM create subcommand."""
+    if cmdargs.name and cmdargs.count != 1:
+        sys.exit("Error: when VM name is specified, the count must be 1")
+
+    rc, sizes = driver.get_flavor_list(account)
+    assert rc == RwStatus.SUCCESS
+
+    try:
+        size = [s for s in sizes.flavorinfo_list if s.name == cmdargs.flavor][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find flavor %s" % \
+                 cmdargs.flavor)
+    print(size)
+    rc, images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    if images is None:
+       sys.exit("Error: No images found")
+    try:
+        image = [i for i in images.imageinfo_list if cmdargs.image in i.name][0]
+    except IndexError:
+        sys.exit("Error: Failed to create VM, couldn't find image %s" % \
+                 cmdargs.image)
+    print(image)
+
+    # VM name is not specified, so determine a unique VM name
+    # VM name should have the following format:
+    #     rwopenstack_<host>_vm<id>, e.g., rwopenstack_grunt16_vm1
+    # The following code gets the list of existing VMs and determines
+    # a unique id for the VM name construction.
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS
+    prefix = 'rwopenstack_%s_vm' % hostname()
+    vmid = 0;
+    for n in nodes.vminfo_list:
+        if n.vm_name.startswith(prefix):
+            temp_str = n.vm_name[len(prefix):]
+            if temp_str == '':
+                temp = 1
+            else:
+                temp = int(n.vm_name[len(prefix):])
+
+            if (temp > vmid):
+                vmid = temp
+
+    nodelist = []
+    for i in range(0, cmdargs.count):
+            if cmdargs.name:
+                vm_name = cmdargs.name
+            else:
+                vm_name = '%s%d' % (prefix, vmid+i+1)
+            rc, netlist = driver.get_network_list(account)
+            assert rc == RwStatus.SUCCESS      
+            for network in netlist.networkinfo_list:
+                 print(network)    
+
+            vm = RwcalYang.VMInfoItem()
+            vm.vm_name = vm_name
+            vm.flavor_id = size.id
+            vm.image_id  = image.id
+            vm.cloud_init.userdata = ''
+
+            nets = dict()
+            for network in netlist.networkinfo_list:
+                if network.network_name != "public":
+                    nwitem = RwcalYang.VMInfoItem_NetworkList()                        
+                    nwitem.network_id = network.network_id                 
+                    nets[network.network_name] = nwitem
+                     
+            logger.debug('creating VM using nets %s' % cmdargs.networks )
+            for net in cmdargs.networks.split(','):
+                if not net in nets:
+                    print(("Invalid network name '%s'" % net))
+                    print(('available nets are %s' % ','.join(list(nets.keys())) ))
+                    sys.exit(1)
+                if net != cmdargs.mgmt_network:
+                    vm.network_list.append(nets[net])
+
+            print(vm.network_list)
+            rc, node_id = driver.create_vm(account, vm) 
+
+            # wait for 1 to be up before starting the rest
+            # this is an attempt to make sure the image is cached
+            nodelist.append(node_id)
+            if i == 0 or cmdargs.wait_after_create is True:
+                #wait_until_running([node], timeout=300)
+                wait_till_active(driver, account, nodelist, timeout=300)               
+            print(node_id)
+    if cmdargs.reservation_server_url is not None:
+            if not cmdargs.wait_after_create:
+                print("Waiting for VMs to start")
+                wait_till_active(driver, account, nodelist, timeout=300)               
+                print("VMs are up")
+            header=True
+            for node in nodelist:
+                vm_register(node, driver, account, cmdargs, header)
+                header=False
+                
+
+def vm_destroy_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    ct = len(nodes.vminfo_list)
+    if cmdargs.destroy_all or cmdargs.wait:
+        rc=0
+        for n in nodes.vminfo_list:
+            if testbed is not None:
+                try:
+                    testbed.remove_resource(n.vm_name)
+                except:
+                    print("WARNING: error deleting resource from reservation system")
+            if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                print('Error: failed to destroy node %s' % n.vm_name)
+                rc=1
+        if rc:
+            sys.exit(1)
+        if cmdargs.wait:
+            while ct > 0:
+                sys.stderr.write("waiting for %d VMs to exit...\n" % ct)
+                time.sleep(1)
+                try:
+                    rc, nodesnw = driver.get_vm_list(account)
+                    assert rc == RwStatus.SUCCESS      
+                    ct = len(nodesnw.vminfo_list )
+                except:
+                    pass
+        
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes.vminfo_list:
+            if vm_re.match(n.vm_name):
+                ct += 1
+                if testbed is not None:
+                    try:
+                        testbed.remove_resource(n.vm_name)
+                    except:
+                        print("WARNING: error deleting resource from reservation system")
+                if RwStatus.SUCCESS != driver.delete_vm(account, n.vm_id):
+                    print('Error: failed to destroy node %s' % n.vm_name)
+                    return
+                print('destroyed %s' % n.vm_name)
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+def vm_rebuild_subcommand(driver, account, cmdargs):
+    images = driver.list_images()
+    found=0
+    for i in images:
+        if i.name == cmdargs.image_name:
+            found=1
+            break
+    if found != 1:
+        print('Error: Rebuild failed - image %s not found' % cmdargs.image_name)
+        sys.exit(1)
+    image=i
+    nodes = driver.list_nodes()
+    if cmdargs.rebuild_all:
+        rc=0
+        for n in nodes:
+            if not driver.ex_rebuild(n,image):
+                print('Error: failed to rebuild node %s' % n.name)
+                rc=1
+            if rc:
+               sys.exit(1)
+            rebuilt=0
+            while rebuilt != 1:
+                time.sleep(10)
+                nw_nodes = driver.list_nodes()
+                for nw in nw_nodes:
+                    if nw.name == n.name:
+                        if nw.state == n.state:
+                            rebuilt=1
+                        break  
+    else:
+        vm_re = re.compile('^%s$' % cmdargs.vm_name)
+        ct = 0
+        for n in nodes:
+            if vm_re.match(n.name):
+                ct += 1
+                if not driver.ex_rebuild(n,image):
+                    print('Error: failed to rebuild node %s' % n.name)
+                    return
+                print('Rebuilt %s' % n.name)
+                rebuilt=0
+                while rebuilt != 1:
+                    time.sleep(10)
+                    nw_nodes = driver.list_nodes()
+                    for nw in nw_nodes:
+                        if nw.name == n.name:
+                            if nw.state == n.state:
+                                rebuilt=1
+                            break  
+        if ct == 0:
+            print("No VMs matching \"%s\" found" % ( cmdargs.vm_name ))
+        
+                    
+
+def vm_reboot_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    if cmdargs.reboot_all:
+        for n in nodes.vminfo_list:
+            '''
+            if not n.reboot():
+                print 'Error: failed to reboot node %s' % n.name
+            else:
+                print "rebooted %s" % n.name
+            '''
+            time.sleep(cmdargs.sleep_time)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS !=  driver.reboot_vm(account,n.vm_id):
+                    print('Error: failed to reboot node %s' % n.vm_name)
+                else:
+                    print("rebooted %s" % n.vm_name)
+                    
+
+def vm_start_subcommand(driver, account, cmdargs):
+    rc, nodes = driver.get_vm_list(account)
+    assert rc == RwStatus.SUCCESS      
+    if cmdargs.start_all:
+        for n in nodes.vminfo_list:
+            print(dir(n))
+            if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                print('Error: failed to start node %s' % n.vm_name)
+            else:
+                print("started %s" % n.vm_name)
+    else:
+        for n in nodes.vminfo_list:
+            if n.vm_name == cmdargs.vm_name:
+                if RwStatus.SUCCESS != driver.start_vm(account, n.vm_id):
+                    print('Error: failed to start node %s' % n.vm_name)
+                else:
+                    print("started %s" % n.vm_name)
+                    
+def vm_subcommand(driver, account, cmdargs):
+    """Process the vm subcommand"""
+
+    if cmdargs.which == 'list':
+        rc, nodes = driver.get_vm_list(account)
+        assert rc == RwStatus.SUCCESS  
+        for n in nodes.vminfo_list:
+            print(n)           
+            if n.state == 4:
+                if not cmdargs.ipsonly:
+                    print("%s is shutoff" % n.vm_name)
+            elif cmdargs.ipsonly:
+                i = n.management_ip
+                if i is not None:
+                    print(i)
+            else: 
+                if n.management_ip is not None:
+                    if len(n.private_ip_list) > 0:
+                        print("%s %s,%s" % (n.vm_name, n.management_ip, ",".join([i.get_ip_address() for i in n.private_ip_list])))
+                    else:
+                        print("%s %s" % (n.vm_name, n.management_ip))
+                else:
+                    print("%s NO IP" % n.vm_name)
+
+    elif cmdargs.which == 'create':
+        vm_create_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'reboot':
+        vm_reboot_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'start':
+        vm_start_subcommand(driver, account, cmdargs)
+    elif cmdargs.which == 'destroy':
+        vm_destroy_subcommand(driver, account, cmdargs)
+    #elif cmdargs.which == 'rebuild':
+    #    vm_rebuild_subcommand(driver, account, cmdargs)
+
+def image_delete_subcommand(driver, account, cmdargs):
+    rc,images = driver.get_image_list(account)
+    assert rc == RwStatus.SUCCESS
+    account.openstack.key          = 'admin'
+    if cmdargs.delete_all:
+        for i in images.imageinfo_list:
+            if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                print('Error: failed to delete image %s' % i.name)
+    else:
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                if RwStatus.SUCCESS != driver.delete_image(account, i.id):
+                    print('Error: failed to delete image %s' % i.name)
+
+def image_subcommand(driver, account, cmdargs):
+    """Process the image subcommand"""
+    if cmdargs.which == 'list':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+
+        for i in images.imageinfo_list:
+            print(i)
+
+    elif cmdargs.which == 'delete':
+        image_delete_subcommand(driver, account, cmdargs)
+
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print("FATAL: image \"%s\" already exists" % cmdargs.image_name)
+                return 1
+        
+        print("creating image \"%s\" using %s ..." % \
+              (cmdargs.image_name, cmdargs.file_name))
+        img = RwcalYang.ImageInfoItem()
+        img.name = cmdargs.image_name
+        img.location = cmdargs.file_name
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        rc, img_id = driver.create_image(account, img) 
+        print("... done. image_id is %s" % img_id)
+        return img_id
+
+    elif cmdargs.which == 'getid':
+        rc, images = driver.get_image_list(account)
+        assert rc == RwStatus.SUCCESS
+        found=0
+        for i in images.imageinfo_list:
+            if i.name == cmdargs.image_name:
+                print(i.id)
+                found += 1
+        if found != 1:
+            sys.exit(1)
+        
+def flavor_subcommand(driver, account, cmdargs):
+    """Process the flavor subcommand"""
+    if cmdargs.which == 'list':
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            rc, flv = driver.get_flavor(account, f.id)     
+            print(flv)     
+    elif cmdargs.which == 'create':
+        account.openstack.key          = 'admin'    
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = cmdargs.flavor_name
+        flavor.vm_flavor.memory_mb                 = cmdargs.memory_size
+        flavor.vm_flavor.vcpu_count                = cmdargs.vcpu_count
+        flavor.vm_flavor.storage_gb                = cmdargs.disc_size
+        if cmdargs.hugepages_kilo:
+            flavor.guest_epa.mempage_size              = cmdargs.hugepages_kilo
+        if cmdargs.numa_nodes:
+            flavor.guest_epa.numa_node_policy.node_cnt = cmdargs.numa_nodes
+        if cmdargs.dedicated_cpu:
+            flavor.guest_epa.cpu_pinning_policy        = 'DEDICATED'
+        if cmdargs.pci_count:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'PCI_%dG_ALIAS' % (cmdargs.pci_speed)
+            dev.count = cmdargs.pci_count 
+        if cmdargs.colleto:
+            dev = flavor.guest_epa.pcie_device.add()
+            dev.device_id = 'COLETO_VF_ALIAS'
+            dev.count = cmdargs.colleto 
+        if cmdargs.trusted_host:
+            flavor.guest_epa.trusted_execution = True 
+
+        rc, flavor_id = driver.create_flavor(account, flavor)
+        assert rc == RwStatus.SUCCESS
+
+        print("created flavor %s id %s" % (cmdargs.flavor_name, flavor_id)) 
+
+    elif cmdargs.which == 'delete':
+        account.openstack.key          = 'admin'    
+        rc, sizes = driver.get_flavor_list(account)
+        assert rc == RwStatus.SUCCESS
+        for f in sizes.flavorinfo_list:
+            if f.name == cmdargs.flavor_name:
+                rc = driver.delete_flavor(account, f.id)
+                assert rc == RwStatus.SUCCESS
+
+def hostagg_subcommand(driver, account, cmdargs):
+    """Process the hostagg subcommand"""
+    if cmdargs.which == 'list':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            print("%-12s %-12s" % \
+                  (f.name, f.availability_zone))
+                
+    elif cmdargs.which == 'create':
+        nova = ra_nova_connect(project='admin')
+        hostagg = nova.aggregates.create(cmdargs.hostagg_name, 
+                                     cmdargs.avail_zone)
+        print("created hostagg %s in %s" % (hostagg.name, hostagg.availability_zone)) 
+
+    elif cmdargs.which == 'delete':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                if cmdargs.force_delete_hosts:
+                    for h in f.hosts:
+                        f.remove_host(h)
+
+                f.delete()
+
+    elif cmdargs.which == 'addhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.add_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'delhost':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                f.remove_host(cmdargs.host_name)
+
+    elif cmdargs.which == 'setmetadata':
+        nova = ra_nova_connect(project='admin')
+        for f in nova.aggregates.list():
+            if f.name == cmdargs.hostagg_name:
+                d = dict([cmdargs.extra_specs.split("="),])                
+                f.set_metadata(d)
+
+def quota_subcommand(driver, account, cmdargs):
+    """Process the quota subcommand"""
+    nova = ra_nova_connect(project='admin')
+    cfgfile = get_openstack_file(None,  cmdargs.project)
+    kwargs = load_params(cfgfile)
+
+    keystone = keystone_client.Client(username=kwargs.get('OS_USERNAME'),
+                               password=kwargs.get('OS_PASSWORD'),
+                               tenant_name=kwargs.get('OS_TENANT_NAME'),
+                               auth_url=kwargs.get('OS_AUTH_URL'))
+    if cmdargs.which == 'set':
+        nova.quotas.update(keystone.tenant_id, 
+                           ram=cmdargs.memory, 
+                           floating_ips=cmdargs.ips, 
+                           instances=cmdargs.vms, 
+                           cores=cmdargs.vcpus)
+    elif cmdargs.which == 'get':
+        print("get quotas for tenant %s %s" % \
+              (cmdargs.project, keystone.tenant_id))
+        q = nova.quotas.get(keystone.tenant_id)
+        for att in [ 'ram', 'floating_ips', 'instances', 'cores' ]: 
+            print("%12s: %6d" % ( att, getattr(q, att) ))
+        
+def rules_subcommand(driver, account, cmdargs):
+    nova = ra_nova_connect(project='demo')
+    group=nova.security_groups.find(name='default')
+    if cmdargs.which == 'set':
+        try:
+            nova.security_group_rules.create(group.id,ip_protocol='tcp', from_port=1, to_port=65535 )
+        except BadRequest:
+            pass
+        try: 
+            nova.security_group_rules.create(group.id, ip_protocol='icmp',from_port=-1, to_port=-1 )
+        except BadRequest:
+            pass
+            
+    elif cmdargs.which == 'list':
+        for r in group.rules:
+            if r['from_port'] == -1:
+                print("rule %d proto %s from IP %s" % ( r['id'], r['ip_protocol'], r['ip_range']['cidr'] ))
+            else:
+                print("rule %d proto %s from port %d to %d from IP %s" % ( r['id'], r['ip_protocol'], r['from_port'], r['to_port'], r['ip_range']['cidr'] ))
+
+
+def register_subcommand(driver, account, cmdargs):
+    cmdargs.reserve_new_vms = False
+    vm_register('all', driver, account, cmdargs)       
+           
+##
+# Command line argument specification
+##
+desc="""This tool is used to manage the VMs"""
+kilo=platform.dist()[1]=='21'
+parser = argparse.ArgumentParser(description=desc)
+subparsers = parser.add_subparsers()
+ipaddr = socket.gethostbyname(socket.getfqdn())
+reservation_server_url = os.environ.get('RESERVATION_SERVER', 'http://reservation.eng.riftio.com:80')
+# ipaddr = netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']
+#default_auth_url = 'http://%s:5000/v3/' % ipaddr
+default_auth_url = 'http://10.66.4.27:5000/v3/'
+
+parser.add_argument('-t', '--provider-type', dest='provider_type',
+                    type=str, default='OPENSTACK', 
+                    help='Cloud provider type (default: %(default)s)')
+parser.add_argument('-u', '--user-name', dest='user', 
+                    type=str, default='demo', 
+                    help='User name (default: %(default)s)')
+parser.add_argument('-p', '--password', dest='passwd', 
+                    type=str, default='mypasswd', 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-m', '--mgmt-nw', dest='mgmt_network', 
+                    type=str, default='private', 
+                    help='mgmt-network (default: %(default)s)')
+parser.add_argument('-a', '--auth-url', dest='auth_url', 
+                    type=str, default=default_auth_url, 
+                    help='Password (default: %(default)s)')
+parser.add_argument('-r', '--reservation_server_url', dest='reservation_server_url', 
+                    type=str, default=reservation_server_url, 
+                    help='reservation server url, use None to disable (default %(default)s)' )
+parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='raise the logging level')
+
+##
+# Subparser for VM
+##
+vm_parser = subparsers.add_parser('vm')
+vm_subparsers = vm_parser.add_subparsers()
+
+# Create VM subparser
+vm_create_parser = vm_subparsers.add_parser('create')
+vm_create_parser.add_argument('-c', '--count',
+                              type=int, default=1,
+                              help='The number of VMs to launch '
+                                   '(default: %(default)d)')
+vm_create_parser.add_argument('-i', '--image', 
+                                                         default='rwopenstack_vm',
+                              help='Specify the image for the VM  (default: %(default)s)')
+vm_create_parser.add_argument('-n', '--name',
+                              help='Specify the name of the VM')
+vm_create_parser.add_argument('-f', '--flavor',
+                              help='Specify the flavor for the VM')
+vm_create_parser.add_argument('-R', '--reserve', dest='reserve_new_vms', 
+                    action='store_true', help='reserve any newly created VMs')
+vm_create_parser.add_argument('-s', '--single', dest='wait_after_create', 
+                    action='store_true', help='wait for each VM to start before creating the next')
+vm_create_parser.add_argument('-N', '--networks', dest='networks', type=str, 
+                                default='private,private2,private3,private4',
+                                help='comma separated list of networks to connect these VMs to (default: %(default)s)' )
+
+vm_create_parser.set_defaults(which='create')
+# Reboot VM subparser
+vm_reboot_parser = vm_subparsers.add_parser('reboot')
+group = vm_reboot_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--reboot-all', 
+                   dest='reboot_all', action='store_true',
+                   help='Reboot all VMs')
+vm_reboot_parser.add_argument('-s', '--sleep', dest='sleep_time', type=int, default=4, help='time in seconds to sleep between reboots')
+vm_reboot_parser.set_defaults(which='reboot')
+
+
+"""
+# start VM subparser
+vm_start_parser = vm_subparsers.add_parser('start')
+group = vm_start_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM')
+group.add_argument('-a', '--start-all', 
+                   dest='start_all', action='store_true',
+                   help='Start all VMs')
+vm_start_parser.set_defaults(which='start')
+"""
+
+# Destroy VM subparser
+vm_destroy_parser = vm_subparsers.add_parser('destroy')
+group = vm_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--destroy-all', 
+                   dest='destroy_all', action='store_true',
+                   help='Delete all VMs')
+group.add_argument('-w', '--wait', 
+                   dest='wait', action='store_true',
+                   help='destroy all and wait until all VMs have exited')
+vm_destroy_parser.set_defaults(which='destroy')
+
+# Rebuild VM subparser
+vm_rebuild_parser = vm_subparsers.add_parser('rebuild')
+group = vm_rebuild_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--vm-name', dest='vm_name',
+                   type=str,
+                   help='Specify the name of the VM (accepts regular expressions)')
+group.add_argument('-a', '--rebuild-all', 
+                   dest='rebuild_all', action='store_true',
+                   help='Rebuild all VMs')
+vm_rebuild_parser.add_argument('-i', '--image-name', dest='image_name',
+                              type=str,
+                              help='Specify the name of the image')
+vm_rebuild_parser.set_defaults(which='rebuild')
+
+# List VM subparser
+vm_list_parser = vm_subparsers.add_parser('list')
+vm_list_parser.set_defaults(which='list')
+vm_list_parser.add_argument('-i', '--ips_only', dest='ipsonly', 
+                            action='store_true', 
+                            help='only list IP addresses')
+
+vm_parser.set_defaults(func=vm_subcommand)
+
+##
+# Subparser for image
+##
+image_parser = subparsers.add_parser('image')
+image_subparsers = image_parser.add_subparsers()
+
+# List image subparser
+image_list_parser = image_subparsers.add_parser('list')
+image_list_parser.set_defaults(which='list')
+
+# Delete image subparser
+image_destroy_parser = image_subparsers.add_parser('delete')
+group = image_destroy_parser.add_mutually_exclusive_group()
+group.add_argument('-n', '--image-name', dest='image_name',
+                   type=str,
+                   help='Specify the name of the image')
+group.add_argument('-a', '--delete-all', 
+                   dest='delete_all', action='store_true',
+                   help='Delete all images')
+image_destroy_parser.set_defaults(which='delete')
+
+# create image
+image_create_parser = image_subparsers.add_parser('create')
+image_create_parser.set_defaults(which='create')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_create_parser.add_argument('-f', '--filename', dest='file_name',
+                                  type=str, 
+                                  default='/net/sharedfiles/home1/common/vm/rift-root-current.qcow2',
+                                  help='name of the existing qcow2 image file')
+
+
+image_create_parser = image_subparsers.add_parser('getid')
+image_create_parser.set_defaults(which='getid')
+image_create_parser.add_argument('-n', '--image-name', dest='image_name',
+                                  type=str,
+                                  default="rwopenstack_vm",
+                                  help='Specify the name of the image')
+image_parser.set_defaults(func=image_subcommand)
+
+##
+# Subparser for flavor
+##
+flavor_parser = subparsers.add_parser('flavor')
+flavor_subparsers = flavor_parser.add_subparsers()
+
+# List flavor subparser
+flavor_list_parser = flavor_subparsers.add_parser('list')
+flavor_list_parser.set_defaults(which='list')
+
+# Create flavor subparser
+flavor_create_parser = flavor_subparsers.add_parser('create')
+flavor_create_parser.set_defaults(which='create')
+flavor_create_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+flavor_create_parser.add_argument('-m', '--memory-size', dest='memory_size',
+                                  type=int, default=1024,
+                                  help='Specify the size of the memory in MB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-d', '--disc-size', dest='disc_size',
+                                  type=int, default=16,
+                                  help='Specify the size of the disc in GB '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-v', '--vcpu-count', dest='vcpu_count',
+                                  type=int, default=1,
+                                  help='Specify the number of VCPUs '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-p', '--pci-count', dest='pci_count',
+                                  type=int, default=0,
+                                  help='Specify the number of PCI devices '
+                                       '(default: %(default)d)')
+flavor_create_parser.add_argument('-s', '--pci-speed', dest='pci_speed',
+                                  type=int, default=10,
+                                  help='Specify the speed of the PCI devices in Gbps (default: %(default)d)')
+flavor_create_parser.add_argument('-e', '--hostagg-extra-specs', dest='extra_specs',
+                                  type=str, 
+                                  help='Specify the extra spec ')
+flavor_create_parser.add_argument('-b', '--back-with-hugepages', dest='enable_hugepages',
+                                  action='store_true',
+                                  help='Enable memory backing with hugepages')
+flavor_create_parser.add_argument('-B', '--back-with-hugepages-kilo', dest='hugepages_kilo',
+                                  type=str,
+                                  help='Enable memory backing with hugepages for kilo')
+flavor_create_parser.add_argument('-D', '--dedicated_cpu', dest='dedicated_cpu',
+                                  action='store_true',
+                                  help='Dedicated CPU usage')
+flavor_create_parser.add_argument('-T', '--cpu_threads', dest='cpu_threads',
+                                  type=str, 
+                                  help='CPU threads usage')
+flavor_create_parser.add_argument('-N', '--numa_nodes', dest='numa_nodes',
+                                  type=int, 
+                                  help='Configure numa nodes')
+flavor_create_parser.add_argument('-t', '--trusted-host', dest='trusted_host',  action='store_true', help='restrict instances to trusted hosts')
+flavor_create_parser.add_argument('-c', '--crypto-cards', dest='colleto',  type=int, default=0,  \
+                                    help='how many colleto creek VFs should be passed thru to the VM')
+
+# Delete flavor subparser
+flavor_delete_parser = flavor_subparsers.add_parser('delete')
+flavor_delete_parser.set_defaults(which='delete')
+flavor_delete_parser.add_argument('-n', '--flavor-name', dest='flavor_name',
+                                  type=str,
+                                  help='Specify the name of the flavor')
+
+flavor_parser.set_defaults(func=flavor_subcommand)
+
+##
+# Subparser for host-aggregate 
+##
+hostagg_parser = subparsers.add_parser('hostagg')
+hostagg_subparsers = hostagg_parser.add_subparsers()
+
+# List host-aggregate subparser
+hostagg_list_parser = hostagg_subparsers.add_parser('list')
+hostagg_list_parser.set_defaults(which='list')
+
+# Create hostagg subparser
+hostagg_create_parser = hostagg_subparsers.add_parser('create')
+hostagg_create_parser.set_defaults(which='create')
+hostagg_create_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_create_parser.add_argument('-a', '--avail-zone', dest='avail_zone',
+                                  type=str,
+                                  help='Specify the name of the availability zone')
+# Delete hostagg subparser
+hostagg_delete_parser = hostagg_subparsers.add_parser('delete')
+hostagg_delete_parser.set_defaults(which='delete')
+hostagg_delete_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delete_parser.add_argument('-f', '--force-delete-hosts', dest='force_delete_hosts',
+                                  action='store_true',
+                                  help='Delete the existing hosts')
+
+# Add host subparser
+hostagg_addhost_parser = hostagg_subparsers.add_parser('addhost')
+hostagg_addhost_parser.set_defaults(which='addhost')
+hostagg_addhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_addhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be added')
+
+# Remove host subparser
+hostagg_delhost_parser = hostagg_subparsers.add_parser('delhost')
+hostagg_delhost_parser.set_defaults(which='delhost')
+hostagg_delhost_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_delhost_parser.add_argument('-c', '--compute-host-name', dest='host_name',
+                                  type=str,
+                                  help='Specify the name of the host to be removed')
+
+# Set meta-data subparser
+hostagg_setdata_parser = hostagg_subparsers.add_parser('setmetadata')
+hostagg_setdata_parser.set_defaults(which='setmetadata')
+hostagg_setdata_parser.add_argument('-n', '--hostagg-name', dest='hostagg_name',
+                                  type=str,
+                                  help='Specify the name of the hostagg')
+hostagg_setdata_parser.add_argument('-d', '--meta-data', dest='extra_specs',
+                                  type=str,
+                                  help='Specify the meta-data to be associated to this host aggregate')
+
+hostagg_parser.set_defaults(func=hostagg_subcommand)
+
+##
+# Subparser for quota
+##
+quota_parser = subparsers.add_parser('quota')
+quota_subparser = quota_parser.add_subparsers()
+quota_set_parser = quota_subparser.add_parser('set')
+
+# quota set subparser
+quota_set_parser.set_defaults(which='set')
+quota_set_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to set '
+                                   'the quotas for')
+quota_set_parser.add_argument('-c', '--vcpus', dest='vcpus', 
+                              type=int, default=48, 
+                              help='Maximum number of virtual CPUs that can '
+                                   'be assigned to all VMs in aggregate')
+quota_set_parser.add_argument('-v', '--vms', dest='vms', 
+                              type=int, default=24, 
+                              help='Maximum number of VMs that can be created ' 
+                                   'on this openstack instance '
+                                   '(which may be more than 1 machine)')
+quota_set_parser.add_argument('-i', '--ips', dest='ips', 
+                              type=int, default=250, 
+                              help='Maximum number of Floating IP Addresses '
+                                   'that can be assigned to all VMs '
+                                   'in aggregate')
+quota_set_parser.add_argument('-m', '--memory', dest='memory', 
+                              type=int, default=122880, 
+                              help='Maximum amount of RAM in MB that can be '
+                                   'assigned to all VMs in aggregate')
+
+# quota get subparser
+quota_get_parser = quota_subparser.add_parser('get')
+quota_get_parser.add_argument('-p', '--project', dest='project', 
+                              type=str, default='demo', 
+                              help='project name that you wish to get '
+                                   'the quotas for')
+quota_get_parser.set_defaults(which='get')
+quota_parser.set_defaults(func=quota_subcommand)
+
+##
+# rules subparser
+##
+rules_parser = subparsers.add_parser('rules')
+rules_parser.set_defaults(func=rules_subcommand)
+rules_subparser = rules_parser.add_subparsers()
+rules_set_parser = rules_subparser.add_parser('set')
+rules_set_parser.set_defaults(which='set')
+rules_list_parser = rules_subparser.add_parser('list')
+rules_list_parser.set_defaults(which='list')
+
+register_parser = subparsers.add_parser('register')
+register_parser.set_defaults(func=register_subcommand)
+cmdargs = parser.parse_args()
+
+
+if __name__ == "__main__":
+    logger=logging.getLogger(__name__)
+    if cmdargs.debug:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.DEBUG) 
+    else:
+        logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s', level=logging.WARNING) 
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        #cls = get_driver(Provider.OPENSTACK)
+        pass
+    elif cmdargs.provider_type == 'VSPHERE':
+        cls = get_driver(Provider.VSPHERE)
+    else:
+        sys.exit("Cloud provider %s is NOT supported yet" % cmdargs.provider_type)
+
+    if cmdargs.reservation_server_url == "None" or cmdargs.reservation_server_url == "":
+        cmdargs.reservation_server_url = None
+    if cmdargs.reservation_server_url is not None:
+        sys.path.append('/usr/rift/lib')
+        try:
+            import ndl
+        except Exception as e:
+            logger.warning("Error loading Reservation library")
+            testbed=None
+        else:
+            testbed=ndl.Testbed()
+            testbed.set_server(cmdargs.reservation_server_url)
+            
+
+
+    if cmdargs.provider_type == 'OPENSTACK':
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = cmdargs.user
+        account.openstack.secret       = cmdargs.passwd
+        account.openstack.auth_url     = cmdargs.auth_url
+        account.openstack.tenant       = cmdargs.user
+        account.openstack.mgmt_network = cmdargs.mgmt_network
+
+        plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+        engine, info, extension = plugin()
+        driver = plugin.get_interface("Cloud")
+        # Get the RwLogger context
+        rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+        try:
+            rc = driver.init(rwloggerctx)
+            assert rc == RwStatus.SUCCESS
+        except:
+            logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+        else:
+            logger.info("Openstack Cal plugin successfully instantiated")
+
+        cmdargs.func(driver, account, cmdargs)
+
+    elif cmdargs.provider_type == 'VSPHERE':
+        driver = cls(cmdargs.user, cmdargs.passwd, host='vcenter' )
+        cmdargs.func(driver, cmdargs)
diff --git a/rwcal/test/ec2.py b/rwcal/test/ec2.py
new file mode 100644 (file)
index 0000000..59ad049
--- /dev/null
@@ -0,0 +1,275 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import glob
+import itertools
+import os
+
+import boto
+import boto.vpc
+
+# TODO:  Pull the lastest of owned instances.
+__default_instance_ami__ = 'ami-e421bc8c'
+
+# TODO:  Make VPC's per user?
+__default_subnet__ = 'subnet-4b484363'
+__default_security_group__ = 'sg-d9da90bc'
+
+__default_instance_type__ = 'm1.medium'
+__default_vpc__ = 'vpc-e7ed4482'
+
+class RWEC2(object):
+    def __init__(self,  subnet=None, ami=None):
+        self._subnet = subnet if subnet is not None else __default_subnet__
+        self._ami = ami if ami is not None else __default_instance_ami__
+
+        self._conn = boto.connect_ec2()
+
+    @staticmethod
+    def cloud_init_current_user():
+        """
+        Return user_data configuration suitable for cloud-init that will create a user
+        with sudo and ssh key access on the remote instance.
+
+        ssh keys are found with the glob ~/.ssh/*pub*
+        """
+        user_data = "users:\n"
+        user_data += " - name: %s\n" % (os.getlogin(),)
+        user_data += "   groups: [wheel, adm, systemd-journal]\n"
+        user_data += "   sudo: [\"ALL=(ALL) NOPASSWD:ALL\"]\n"
+        user_data += "   shell: /bin/bash\n"
+        user_data += "   ssh_authorized_keys:\n"
+        for pub_key in glob.glob('%s/.ssh/*pub*' % (os.environ['HOME'],)):
+            with open(pub_key) as fp:
+                user_data += "    -  %s" % (fp.read(),)
+
+        return user_data
+
+
+    @staticmethod
+    def cloud_init_yum_repos():
+        """
+        Return a string of user_data commands that can be used to update the yum
+        repos to point to the correct location.  They should be added by the caller
+        within a 'runcmd:' block.
+        """
+        ret = " - sed -i -e 's,www\.,,' -e 's,riftio\.com/mirrors,riftio.com:8881,' /etc/yum.repos.d/*.repo\n"
+        return ret
+
+    def instances(self, cluster_component, cluster_instance):
+        """
+        List of instances owned by the given cluster instance
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        ret = []
+        reservations = self._conn.get_all_instances()
+        for instance in [instance for reservation in reservations for instance in reservation.instances]:
+            tags = instance.tags
+            if (tags.get('parent_component') == cluster_component
+                    and tags.get('parent_instance') == cluster_instance):
+                ret.append(instance)
+
+        return ret
+
+    def provision_master(self, cluster_component, cluster_instance):
+        """
+        Provision a master instance in EC2.  The master instance is a special instance with the
+        following features:
+            - Public IP
+            - /home shared over NFS
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+
+        @return                   - boto.ec2.instance.Instances provisioned
+        """
+        vpc = boto.vpc.VPCConnection()
+        subnet = vpc.get_all_subnets(subnet_ids=__default_subnet__)[0]
+        cidr_block = subnet.cidr_block
+        vpc.close()
+
+        user_data = "#cloud-config\n"
+        user_data += "runcmd:\n"
+        user_data += " - echo '/home %s(rw,root_squash,sync)' >  /etc/exports\n" % (cidr_block,)
+        user_data += " - systemctl start nfs-server\n"
+        user_data += " - systemctl enable nfs-server\n"
+        user_data += self.cloud_init_yum_repos()
+        user_data += self.cloud_init_current_user()
+
+
+        net_if = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                subnet_id=__default_subnet__,
+                groups=[__default_security_group__,],
+                associate_public_ip_address=True)
+
+        net_ifs = boto.ec2.networkinterface.NetworkInterfaceCollection(net_if)
+
+        new_reservation = self._conn.run_instances(
+                image_id=self._ami,
+                min_count=1,
+                max_count=1,
+                instance_type=__default_instance_type__,
+                network_interfaces=net_ifs,
+                tenancy='default',
+                user_data=user_data)
+        instance = new_reservation.instances[0]
+
+        instance.add_tag('parent_component', cluster_component)
+        instance.add_tag('parent_instance', cluster_instance)
+        instance.add_tag('master', 'self')
+
+        return instance
+
+
+    def provision(self, cluster_component, cluster_instance, n_instances=1, master_instance=None, net_ifs=None):
+        """
+        Provision a number of EC2 instanced to be used in a cluster.
+
+        @param cluster_component  - parent cluster of each instance
+        @param cluster_instance   - instance id of the owning cluster
+        @param n_instances        - number of requested instances
+        @param master_instance    - if specified, the boto.ec2.instance.Instance that is providing master
+                                    services for this cluster
+
+        @return                   - list of boto.ec2.instance.Instances provisioned
+        """
+        instances = []
+        cluster_instance = int(cluster_instance)
+
+        def posess_instance(instance):
+            instances.append(instance)
+            instance.add_tag('parent_component', cluster_component)
+            instance.add_tag('parent_instance', cluster_instance)
+            if master_instance is not None:
+                instance.add_tag('master', master_instance.id)
+            else:
+                instance.add_tag('master', 'None')
+
+        user_data = "#cloud-config\n"
+        user_data += self.cloud_init_current_user()
+        user_data += "runcmd:\n"
+        user_data += self.cloud_init_yum_repos()
+
+        if master_instance is not None:
+            user_data += " - echo '%s:/home /home nfs rw,soft,sync 0 0' >> /etc/fstab\n" % (
+                    master_instance.private_ip_address,)
+            user_data += " - mount /home\n"
+
+        if net_ifs is not None:
+            kwds = {'subnet_id': __default_subnet__}
+        else:
+            kwds = {'network_interfaces': net_ifs}
+            print net_ifs
+
+        new_reservation = self._conn.run_instances(
+            image_id=self._ami,
+            min_count=n_instances,
+            max_count=n_instances,
+            instance_type=__default_instance_type__,
+            tenancy='default',
+            user_data=user_data,
+            network_interfaces=net_ifs)
+
+        _ = [posess_instance(i) for i in new_reservation.instances]
+
+        return instances
+
+    def stop(self, instance_id, free_resources=True):
+        """
+        Stop the specified instance, freeing all allocated resources (elastic ips, etc) if requested.
+
+        @param instance_id      - name of the instance to stop
+        @param free_resource    - If True that all resources that were only owned by this instance
+                                  will be deallocated as well.
+        """
+        self._conn.terminate_instances(instance_ids=[instance_id,])
+
+    def fastpath111(self):
+        vpc_conn = boto.vpc.VPCConnection()
+        vpc = vpc_conn.get_all_vpcs(vpc_ids=[__default_vpc__,])[0]
+        subnet_addrs_split = vpc.cidr_block.split('.')
+
+        networks = {
+            'mgmt': [s for s in vpc_conn.get_all_subnets() if s.id == __default_subnet__][0],
+            'tg_fabric': None,
+            'ts_fabric': None,
+            'tg_lb_ext': None,
+            'lb_ts_ext': None,
+        }
+
+        for i, network in enumerate([n for n, s in networks.items() if s == None]):
+            addr = "%s.%s.10%d.0/25" % (subnet_addrs_split[0], subnet_addrs_split[1], i)
+            try:
+                subnet = vpc_conn.create_subnet(vpc.id, addr)
+            except boto.exception.EC2ResponseError, e:
+                if 'InvalidSubnet.Conflict' == e.error_code:
+                    subnet = vpc_conn.get_all_subnets(filters=[('vpcId', vpc.id), ('cidrBlock', addr)])[0]
+                else:
+                    raise
+
+            networks[network] = subnet
+
+        def create_interfaces(nets):
+            ret = boto.ec2.networkinterface.NetworkInterfaceCollection()
+
+            for i, network in enumerate(nets):
+                spec = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+                        subnet_id=networks[network].id,
+                        description='%s iface' % (network,),
+                        groups=[__default_security_group__],
+                        device_index=i)
+                ret.append(spec)
+
+            return ret
+
+        ret = {}
+
+        ret['cli'] = self.provision_master('fp111', 1)
+        ret['cli'].add_tag('Name', 'cli')
+
+        net_ifs = create_interfaces(['mgmt'])
+        ret['mgmt'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['mgmt'].add_tag('Name', 'mgmt')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric'])
+        ret['tg1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg1'].add_tag('Name', 'tg1')
+
+        net_ifs = create_interfaces(['mgmt', 'tg_fabric', 'tg_lb_ext'])
+        ret['tg2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['tg2'].add_tag('Name', 'tg2')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric'])
+        ret['ts1'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts1'].add_tag('Name', 'ts1')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext'])
+        ret['ts3'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts3'].add_tag('Name', 'ts3')
+
+        net_ifs = create_interfaces(['mgmt', 'ts_fabric', 'lb_ts_ext', 'tg_lb_ext'])
+        ret['ts2'] = self.provision('fp111', 1, master_instance=ret['cli'], net_ifs=net_ifs)[0]
+        ret['ts2'].add_tag('Name', 'ts2')
+
+        return ret
+
+# vim: sw=4
diff --git a/rwcal/test/openstack_resources.py b/rwcal/test/openstack_resources.py
new file mode 100755 (executable)
index 0000000..f7fb00d
--- /dev/null
@@ -0,0 +1,483 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import logging
+import rw_peas
+import rwlogger
+import time
+import argparse
+import os
+import sys
+import uuid
+from os.path import basename
+
+FLAVOR_NAME = 'm1.medium'
+DEFAULT_IMAGE='/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+
+persistent_resources = {
+    'vms'      : ['mission_control','launchpad',],
+    'networks' : ['public', 'private', 'multisite'],
+    'flavors'  : ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'],
+    'images'   : ['rwimage','rift-root-latest.qcow2','rift-root-latest-trafgen.qcow2', 'rift-root-latest-trafgen-f.qcow2']
+}
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'physical_network'   : 'physnet1',
+    'network_type'       : 'VLAN',
+    'segmentation_id'    : 42, ### What else?
+    'subnets'            : ["11.0.0.0/24", "12.0.0.0/24", "13.0.0.0/24", "14.0.0.0/24"],
+    'subnet_index'       : 0,
+    }
+
+
+logging.basicConfig(level=logging.INFO)
+
+USERDATA_FILENAME = os.path.join(os.environ['RIFT_INSTALL'],
+                                 'etc/userdata-template')
+
+
+RIFT_BASE_USERDATA = '''
+#cloud-config
+runcmd:
+ - sleep 5
+ - /usr/rift/scripts/cloud/enable_lab
+ - /usr/rift/etc/fix_this_vm
+'''
+
+try:
+    fd = open(USERDATA_FILENAME, 'r')
+except Exception as e:
+    #logger.error("Received exception during opening of userdata (%s) file. Exception: %s" %(USERDATA_FILENAME, str(e)))
+    sys.exit(-1)
+else:
+    LP_USERDATA_FILE = fd.read()
+    # Run the enable lab script when the openstack vm comes up
+    LP_USERDATA_FILE += "runcmd:\n"
+    LP_USERDATA_FILE += " - /usr/rift/scripts/cloud/enable_lab\n"
+    LP_USERDATA_FILE += " - /usr/rift/etc/fix_this_vm\n"
+
+
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+    cal = plugin.get_interface("Cloud")
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+        return cal 
+    
+def get_cal_account(auth_url):
+    """
+    Returns cal account
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = auth_url
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+
+logger = logging.getLogger('rift.cal.openstackresources')
+
+class OpenstackResources(object):
+    """
+    A stupid class to manage bunch of openstack resources
+    """
+    def __init__(self, controller):    
+        self._cal      = get_cal_plugin()
+        self._acct     = get_cal_account('http://'+controller+':5000/v3/')
+        self._id       = 0
+        self._image_id = None
+        self._flavor_id = None
+        
+    def _destroy_vms(self):
+        """
+        Destroy VMs
+        """
+        logger.info("Initiating VM cleanup")
+        rc, rsp = self._cal.get_vdu_list(self._acct)
+        vdu_list = [vm for vm in rsp.vdu_info_list if vm.name not in persistent_resources['vms']]
+        logger.info("Deleting VMs : %s" %([x.name for x in vdu_list]))
+        
+        for vdu in vdu_list:
+            self._cal.delete_vdu(self._acct, vdu.vdu_id)
+
+        logger.info("VM cleanup complete")
+
+    def _destroy_networks(self):
+        """
+        Destroy Networks
+        """
+        logger.info("Initiating Network cleanup")
+        rc, rsp = self._cal.get_virtual_link_list(self._acct)
+        vlink_list = [vlink for vlink in rsp.virtual_link_info_list if vlink.name not in persistent_resources['networks']]
+
+        logger.info("Deleting Networks : %s" %([x.name for x in vlink_list]))
+        for vlink in vlink_list:
+            self._cal.delete_virtual_link(self._acct, vlink.virtual_link_id)
+        logger.info("Network cleanup complete")
+
+    def _destroy_flavors(self):
+        """
+        Destroy Flavors
+        """
+        logger.info("Initiating flavor cleanup")
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name not in persistent_resources['flavors']]
+            
+        logger.info("Deleting flavors : %s" %([x.name for x in flavor_list]))
+
+        for flavor in flavor_list:
+            self._cal.delete_flavor(self._acct, flavor.id)
+            
+        logger.info("Flavor cleanup complete")
+
+    def _destroy_images(self):
+        logger.info("Initiating image cleanup")
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name not in persistent_resources['images']]
+
+        logger.info("Deleting images : %s" %([x.name for x in image_list]))
+            
+        for image in image_list:
+            self._cal.delete_image(self._acct, image.id)
+            
+        logger.info("Image cleanup complete")
+        
+    def destroy_resource(self):
+        """
+        Destroy resources
+        """
+        logger.info("Cleaning up openstack resources")
+        self._destroy_vms()
+        self._destroy_networks()
+        self._destroy_flavors()
+        self._destroy_images()
+        logger.info("Cleaning up openstack resources.......[Done]")
+
+    def create_mission_control(self):
+        vm_id = self.create_vm('mission_control',
+                               userdata = RIFT_BASE_USERDATA)
+        return vm_id
+    
+
+    def create_launchpad_vm(self, salt_master=None):
+        node_id = str(uuid.uuid4())
+        if salt_master is not None:
+           userdata = LP_USERDATA_FILE.format(master_ip = salt_master,
+                                           lxcname = node_id)
+        else:
+           userdata = RIFT_BASE_USERDATA
+
+        vm_id = self.create_vm('launchpad',
+                              userdata = userdata,
+                              node_id = node_id)
+#        vm_id = self.create_vm('launchpad2',
+#                               userdata = userdata,
+#                               node_id = node_id)
+        return vm_id
+    
+    def create_vm(self, name, userdata, node_id = None):
+        """
+        Creates a VM. The VM name is derived from username
+
+        """
+        vm = RwcalYang.VDUInitParams()
+        vm.name = name
+        vm.flavor_id = self._flavor_id
+        vm.image_id  = self._image_id
+        if node_id is not None:
+            vm.node_id = node_id
+        vm.vdu_init.userdata = userdata
+        vm.allocate_public_address = True
+        logger.info("Starting a VM with parameter: %s" %(vm))
+     
+        rc, vm_id = self._cal.create_vdu(self._acct, vm)
+        assert rc == RwStatus.SUCCESS
+        logger.info('Created vm: %s with id: %s', name, vm_id)
+        return vm_id
+        
+    def create_network(self, name):
+        logger.info("Creating network with name: %s" %name)
+        network                = RwcalYang.NetworkInfoItem()
+        network.network_name   = name
+        network.subnet         = openstack_info['subnets'][openstack_info['subnet_index']]
+
+        if openstack_info['subnet_index'] == len(openstack_info['subnets']):
+            openstack_info['subnet_index'] = 0
+        else:
+            openstack_info['subnet_index'] += 1
+        
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if openstack_info['segmentation_id']:
+            network.provider_network.segmentation_id  = openstack_info['segmentation_id']
+            openstack_info['segmentation_id'] += 1
+
+        rc, net_id = self._cal.create_network(self._acct, network)
+        assert rc == RwStatus.SUCCESS
+
+        logger.info("Successfully created network with id: %s" %net_id)
+        return net_id
+    
+        
+
+    def create_image(self, location):
+        img = RwcalYang.ImageInfoItem()
+        img.name = basename(location)
+        img.location = location
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+
+        logger.info("Uploading image : %s" %img.name)
+        rc, img_id = self._cal.create_image(self._acct, img)
+        assert rc == RwStatus.SUCCESS
+
+        rs = None
+        rc = None
+        image = None
+        for i in range(100):
+            rc, rs = self._cal.get_image(self._acct, img_id)
+            assert rc == RwStatus.SUCCESS
+            logger.info("Image (image_id: %s) reached status : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                image = rs
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+
+        if image is None:
+            logger.error("Failed to upload openstack image: %s", img)
+            sys.exit(1)
+
+        self._image_id = img_id
+        logger.info("Uploading image.......[Done]")
+        
+    def create_flavor(self):
+        """
+        Create Flavor suitable for rift_ping_pong VNF
+        """
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = FLAVOR_NAME
+        flavor.vm_flavor.memory_mb   = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count  = 4 
+        flavor.vm_flavor.storage_gb  = 20 # 20 GB
+
+        logger.info("Creating new flavor. Flavor Info: %s" %str(flavor.vm_flavor))
+
+        rc, flavor_id = self._cal.create_flavor(self._acct, flavor)
+        assert rc == RwStatus.SUCCESS
+        logger.info("Creating new flavor.......[Done]")
+        return flavor_id
+
+    def find_image(self, name):
+        logger.info("Searching for uploaded image: %s" %name)
+        rc, rsp = self._cal.get_image_list(self._acct)
+        image_list = [image for image in rsp.imageinfo_list if image.name ==  name]
+
+        if not image_list:
+            logger.error("Image %s not found" %name)
+            return None
+
+        self._image_id = image_list[0].id
+        logger.info("Searching for uploaded image.......[Done]")
+        return self._image_id
+
+    def find_flavor(self, name=FLAVOR_NAME):
+        logger.info("Searching for required flavor: %s" %name)
+        rc, rsp = self._cal.get_flavor_list(self._acct)
+        flavor_list = [flavor for flavor in rsp.flavorinfo_list if flavor.name == name]
+
+        if not flavor_list:
+            logger.error("Flavor %s not found" %name)
+            self._flavor_id = self.create_flavor()
+        else:
+            self._flavor_id = flavor_list[0].id
+
+        logger.info("Searching for required flavor.......[Done]")
+        return self._flavor_id
+
+        
+    
+
+def main():
+    """
+    Main routine
+    """
+    parser = argparse.ArgumentParser(description='Script to manage openstack resources')
+    
+    parser.add_argument('--controller',
+                        action = 'store',
+                        dest = 'controller',
+                        type = str,
+                        help='IP Address of openstack controller. This is mandatory parameter')
+
+    parser.add_argument('--cleanup',
+                        action = 'store',
+                        dest = 'cleanup',
+                        nargs = '+',
+                        type = str,
+                        help = 'Perform resource cleanup for openstack installation. \n Possible options are {all, flavors, vms, networks, images}')
+
+    parser.add_argument('--persist-vms',
+                        action = 'store',
+                        dest = 'persist_vms',
+                        help = 'VM instance name to persist')
+
+    parser.add_argument('--salt-master',
+                        action = 'store',
+                        dest = 'salt_master',
+                        type = str,
+                        help='IP Address of salt controller. Required, if VMs are being created.')
+
+    parser.add_argument('--upload-image',
+                        action = 'store',
+                        dest = 'upload_image',
+                        help='Openstack image location to upload and use when creating vms.x')
+
+    parser.add_argument('--use-image',
+                        action = 'store',
+                        dest = 'use_image',
+                        help='Image name to be used for VM creation')
+
+    parser.add_argument('--use-flavor',
+                        action = 'store',
+                        dest = 'use_flavor',
+                        help='Flavor name to be used for VM creation')
+    
+    parser.add_argument('--mission-control',
+                        action = 'store_true',
+                        dest = 'mission_control',
+                        help='Create Mission Control VM')
+
+
+    parser.add_argument('--launchpad',
+                        action = 'store_true',
+                        dest = 'launchpad',
+                        help='Create LaunchPad VM')
+
+    parser.add_argument('--use-project',
+                        action = 'store',
+                        dest = 'use_project',
+                        help='Project name to be used for VM creation')
+
+    parser.add_argument('--clean-mclp',
+                        action='store_true',
+                        dest='clean_mclp',
+                        help='Remove Mission Control and Launchpad VMs')
+
+    argument = parser.parse_args()
+
+    if argument.persist_vms is not None:
+        global persistent_resources
+        vm_name_list = argument.persist_vms.split(',')
+        for single_vm in vm_name_list:
+                persistent_resources['vms'].append(single_vm)
+        logger.info("persist-vms: %s" % persistent_resources['vms'])
+
+    if argument.clean_mclp:
+        persistent_resources['vms'] = []
+
+    if argument.controller is None:
+        logger.error('Need openstack controller IP address')
+        sys.exit(-1)
+
+    
+    if argument.use_project is not None:
+        openstack_info['project_name'] = argument.use_project
+
+    ### Start processing
+    logger.info("Instantiating cloud-abstraction-layer")
+    drv = OpenstackResources(argument.controller)
+    logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+        
+    if argument.cleanup is not None:
+        for r_type in argument.cleanup:
+            if r_type == 'all':
+                drv.destroy_resource()
+                break
+            if r_type == 'images':
+                drv._destroy_images()
+            if r_type == 'flavors':
+                drv._destroy_flavors()
+            if r_type == 'vms':
+                drv._destroy_vms()
+            if r_type == 'networks':
+                drv._destroy_networks()
+
+    if argument.upload_image is not None:
+        image_name_list = argument.upload_image.split(',')
+        logger.info("Will upload %d image(s): %s" % (len(image_name_list), image_name_list))
+        for image_name in image_name_list:
+            drv.create_image(image_name)
+            #print("Uploaded :", image_name)
+
+    elif argument.use_image is not None:
+        img = drv.find_image(argument.use_image)
+        if img == None:
+            logger.error("Image: %s not found" %(argument.use_image))
+            sys.exit(-4)
+    else:
+        if argument.mission_control or argument.launchpad:
+            img = drv.find_image(basename(DEFAULT_IMAGE))
+            if img == None:
+                drv.create_image(DEFAULT_IMAGE)
+
+    if argument.use_flavor is not None:
+        drv.find_flavor(argument.use_flavor)
+    else:
+        drv.find_flavor()
+        
+    if argument.mission_control == True:
+        drv.create_mission_control()
+
+    if argument.launchpad == True:
+        drv.create_launchpad_vm(salt_master = argument.salt_master)
+        
+    
+if __name__ == '__main__':
+    main()
+        
diff --git a/rwcal/test/rwcal_callback_gtest.cpp b/rwcal/test/rwcal_callback_gtest.cpp
new file mode 100644 (file)
index 0000000..52dc6f6
--- /dev/null
@@ -0,0 +1,79 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+#include <rwut.h>
+
+#include "rwcal-api.h"
+
+struct test_struct {
+  int accessed;
+};
+
+struct test_struct g_test_struct;
+
+class RWCalCallbackTest : public ::testing::Test {
+  /*
+   * This is a tough one to test as we're really relying on the
+   * gobject introspection to do all the data marshalling for us
+   * correctly.  At this point, all I can think of to do is to
+   * just create a closure and then call it the same way it would
+   * typically be called in C and make sure that everything
+   * executed as expected.
+   */
+ protected:
+  rwcal_module_ptr_t rwcal;
+
+  virtual void SetUp() {
+    rwcal = rwcal_module_alloc();
+    ASSERT_TRUE(rwcal);
+
+    g_test_struct.accessed = 0;
+  }
+
+  virtual void TearDown() {
+    rwcal_module_free(&rwcal);
+  }
+
+  virtual void TestSuccess() {
+    ASSERT_TRUE(rwcal);
+#if 0
+    rwcal_closure_ptr_t closure;
+
+    closure = rwcal_closure_alloc(
+        rwcal,
+        &update_accessed,
+        (void *)&g_test_struct);
+    ASSERT_TRUE(closure);
+
+    ASSERT_EQ(g_test_struct.accessed, 0);
+    rw_cal_closure_callback(closure);
+    ASSERT_EQ(g_test_struct.accessed, 1);
+
+    rwcal_closure_free(&closure);
+    ASSERT_FALSE(closure);
+#endif
+  }
+};
+
+
+TEST_F(RWCalCallbackTest, TestSuccess) {
+  TestSuccess();
+}
diff --git a/rwcal/test/rwcal_dump.cpp b/rwcal/test/rwcal_dump.cpp
new file mode 100644 (file)
index 0000000..ff6fd73
--- /dev/null
@@ -0,0 +1,77 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file cal_dump
+ * @author Jeremy Mordkoff
+ * @date 05/14/2015 
+ * @brief test program to dump what we can glean from an installation
+ */
+
+
+#include <limits.h>
+#include <cstdlib>
+#include <iostream>
+
+#include "rwcal-api.h"
+
+
+int main(int argc, char ** argv, char ** envp)
+{
+
+#if 0
+    rw_status_t status;
+    rwcal_module_ptr_t m_mod;
+    Rwcal__YangData__Rwcal__Flavorinfo__FlavorinfoList  *flavor;
+    rwpb_gi_Rwcal_FlavorInfo *flavors;
+    Rwcal__YangData__Rwcal__Flavorinfo *flavorinfo;
+    unsigned int i;
+    char url[128];
+
+    if (argc != 4 ) {
+       fprintf(stderr, "args are IP user password\n");
+       return(1);
+    }
+    snprintf(url, 128, "http://%s:35357/v2.0/tokens", argv[1] );
+
+    m_mod = rwcal_module_alloc();
+    status = rwcal_cloud_init(m_mod, RW_MANIFEST_RWCAL_CLOUD_TYPE_OPENSTACK_AUTH_URL, argv[2], argv[3], url );
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+
+    status = rwcal_cloud_flavor_infos(m_mod, &flavors);
+    if (status != RW_STATUS_SUCCESS)
+      return status;
+    flavorinfo = flavors->s.message;
+    printf("ID                                       NAME             MEM    DISK VCPU PCI  HP TC\n");
+    printf("---------------------------------------- ---------------- ------ ---- ---- ---- -- --\n");
+    for (i = 0; i<flavorinfo->n_flavorinfo_list; i++) {
+      flavor = flavorinfo->flavorinfo_list[i];
+      printf("%-40s %-16s %6d %4d %4d %4d %2d %2d\n", flavor->id, flavor->name, flavor->memory, flavor->disk, flavor->vcpus, flavor->pci_passthru_bw, 
+              flavor->has_huge_pages, flavor->trusted_host_only );
+    }
+
+    rwcal__yang_data__rwcal__flavorinfo__gi_unref(flavors);
+#endif
+    return 0;
+
+}
+
diff --git a/rwcal/test/test_container_cal.py b/rwcal/test/test_container_cal.py
new file mode 100644 (file)
index 0000000..3ec5ca1
--- /dev/null
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import os
+import sys
+import time
+
+import rw_peas
+import rwlogger
+
+from gi.repository import RwcalYang
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.lxc as lxc
+
+logger = logging.getLogger('rift.cal')
+
+
+def main(argv=sys.argv[1:]):
+    """
+    Assuming that an LVM backing-store has been created with a volume group
+    called 'rift', the following creates an lxc 'image' and a pair of 'vms'.
+    In the LXC based container CAL, an 'image' is container and a 'vm' is a
+    snapshot of the original container.
+
+    In addition to the LVM backing store, it is assumed that there is a network
+    bridge called 'virbr0'.
+
+    """
+    logging.basicConfig(level=logging.DEBUG)
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--rootfs', '-r')
+    parser.add_argument('--num-vms', '-n', type=int, default=2)
+    parser.add_argument('--terminate', '-t', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Acquire the plugin from peas
+    plugin = rw_peas.PeasPlugin('rwcal-plugin', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    cal.init(rwloggerctx)
+
+    # The account object is not currently used, but it is required by the CAL
+    # interface, so we create an empty object here to represent it.
+    account = RwcalYang.CloudAccount()
+    account.account_type = "lxc"
+
+    # Make sure that any containers that were previously created have been
+    # stopped and destroyed.
+    containers = lxc.containers()
+
+    for container in containers:
+        lxc.stop(container)
+
+    for container in containers:
+        lxc.destroy(container)
+
+    template = os.path.join(
+            os.environ['RIFT_INSTALL'],
+            'etc/lxc-fedora-rift.lxctemplate',
+            )
+
+    logger.info(template)
+    logger.info(args.rootfs)
+
+    # Create an image that can be used to create VMs
+    image = RwcalYang.ImageInfoItem()
+    image.name = 'rift-master'
+    image.lxc.size = '2.5G'
+    image.lxc.template_path = template
+    image.lxc.tarfile = args.rootfs
+
+    cal.create_image(account, image)
+
+    # Create a VM
+    vms = []
+    for index in range(args.num_vms):
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift-s{}'.format(index + 1)
+        vm.image_id = image.id
+
+        cal.create_vm(account, vm)
+
+        vms.append(vm)
+
+    # Create the default and data networks
+    network = RwcalYang.NetworkInfoItem(network_name='virbr0')
+    cal.create_network(account, network)
+
+    os.system('/usr/sbin/brctl show')
+
+    # Create pairs of ports to connect the networks
+    for index, vm in enumerate(vms):
+        port = RwcalYang.PortInfoItem()
+        port.port_name = "eth0"
+        port.network_id = network.network_id
+        port.vm_id = vm.vm_id
+        port.ip_address = "192.168.122.{}".format(index + 101)
+        port.lxc.veth_name = "rws{}".format(index)
+
+        cal.create_port(account, port)
+
+    # Swap out the current instance of the plugin to test that the data is
+    # shared among different instances
+    cal = plugin.get_interface("Cloud")
+    cal.init()
+
+    # Start the VMs
+    for vm in vms:
+        cal.start_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Exit if the containers are not supposed to be terminated
+    if not args.terminate:
+        return
+
+    time.sleep(3)
+
+    # Stop the VMs
+    for vm in vms:
+        cal.stop_vm(account, vm.vm_id)
+
+    lxc.ls()
+
+    # Delete the VMs
+    for vm in vms:
+        cal.delete_vm(account, vm.vm_id)
+
+    # Delete the image
+    cal.delete_image(account, image.id)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/rwcal/test/test_openstack_install.py b/rwcal/test/test_openstack_install.py
new file mode 100644 (file)
index 0000000..0e4a61f
--- /dev/null
@@ -0,0 +1,567 @@
+"""
+#
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+#
+# @file test_openstack_install.py
+# @author Varun Prasad (varun.prasad@riftio.com)
+# @date 10/10/2015
+# @brief Test Openstack/os install
+#
+"""
+
+import logging
+import re
+import socket
+import sys
+import time
+import tempfile
+
+from keystoneclient.v3 import client
+import paramiko
+import pytest
+import requests
+import xmlrpc.client
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rwlogger
+
+
+logger = logging.getLogger()
+logging.basicConfig(level=logging.INFO)
+
+
+class Host(object):
+    """A wrapper on top of a host, which provides a ssh connection instance.
+
+    Assumption:
+    The username/password for the VM is default.
+    """
+    _USERNAME = "root"
+    _PASSWORD = "riftIO"
+
+    def __init__(self, hostname):
+        """
+        Args:
+            hostname (str): Hostname (grunt3.qanet.riftio.com)
+        """
+        self.hostname = hostname
+        try:
+            self.ip = socket.gethostbyname(hostname)
+        except socket.gaierror:
+            logger.error("Unable to resolve the hostname {}".format(hostname))
+            sys.exit(1)
+
+        self.ssh = paramiko.SSHClient()
+        # Note: Do not load the system keys as the test will fail if the keys
+        # change.
+        self.ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
+
+    def connect(self):
+        """Set up ssh connection.
+        """
+        logger.debug("Trying to connect to {}: {}".format(
+                self.hostname,
+                self.ip))
+
+        self.ssh.connect(
+                self.ip,
+                username=self._USERNAME,
+                password=self._PASSWORD)
+
+    def put(self, content, dest):
+        """Creates a tempfile and puts it in the destination path in the HOST.
+        Args:
+            content (str): Content to be written to a file.
+            dest (str): Path to store the content.
+        """
+        temp_file = tempfile.NamedTemporaryFile(delete=False)
+        temp_file.write(content.encode("UTF-8"))
+        temp_file.close()
+
+        logger.info("Writing {} file in {}".format(dest, self.hostname))
+        sftp = self.ssh.open_sftp()
+        sftp.put(temp_file.name, dest)
+        sftp.close()
+
+    def clear(self):
+        """Clean up
+        """
+        self.ssh.close()
+
+
+class Grunt(Host):
+    """A wrapper on top of grunt machine, provides functionalities to check
+    if the grunt is up, IP resolution.
+    """
+    @property
+    def grunt_name(self):
+        """Extract the grunt name from the FQDN
+
+        Returns:
+            str: e.g. grunt3 from grunt3.qanet.riftio.com
+        """
+        return self.hostname.split(".")[0]
+
+    @property
+    def dns_server(self):
+        """Hard-coded for now.
+        """
+        return "10.95.0.3"
+
+    @property
+    def floating_ip(self):
+        return "10.95.1.0"
+
+    @property
+    def private_ip(self):
+        """Construct the private IP from the grunt name. 10.0.xx.0 where xx is
+        value of the grunt (3 in case of grunt3)
+        """
+        host_part = re.sub(r"[a-zA-z]+", "", self.grunt_name)
+        return '10.0.{}.0'.format(host_part)
+
+    def is_system_up(self):
+        """Checks if system is up using ssh login.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        try:
+            self.connect()
+        except OSError:
+            return False
+
+        return True
+
+    def wait_till_system_is_up(self, timeout=50, check_openstack=False):
+        """Blocking call to check if system is up.
+        Args:
+            timeout (int, optional): In mins(~).
+            check_openstack (bool, optional): If true will also check if
+                openstack is up and running on the system.
+
+        Raises:
+            OSError: If system start exceeds the timeout
+        """
+
+        TRY_DURATION = 20  # secs
+        total_tries = timeout * (60 / TRY_DURATION)  # 3 tries/mins i.e. 20 secs.
+        tries = 0
+
+        while tries < total_tries:
+            if self.is_system_up():
+                if check_openstack and self.is_openstack_up():
+                        return
+                elif not check_openstack:
+                    return
+
+            logger.info("{} down: Sleeping for {} secs. Try {} of {}".format(
+                    self.hostname,
+                    TRY_DURATION,
+                    tries,
+                    int(total_tries)))
+
+            time.sleep(TRY_DURATION)
+            tries += 1
+
+        raise OSError("Exception in system start {}({})".format(
+                self.hostname,
+                self.ip))
+
+    def is_openstack_up(self):
+        """Checks if openstack is UP, by verifying the URL.
+
+        Returns:
+            bool: Indicates if system is UP
+        """
+        url = "http://{}/dashboard/".format(self.ip)
+
+        logger.info("Checking if openstack({}) is UP".format(url))
+
+        try:
+            requests.get(url)
+        except requests.ConnectionError:
+            return False
+
+        return True
+
+
+class Cobbler(Host):
+    """A thin wrapper on cobbler and provides an interface using XML rpc client.
+
+    Assumption:
+    System instances are already added to cobbler(with ipmi). Adding instances
+    can also be automated, can be taken up sometime later.
+    """
+    def __init__(self, hostname, username="cobbler", password="cobbler"):
+        """
+        Args:
+            hostname (str): Cobbler host.
+            username (str, optional): username.
+            password (str, optional): password
+        """
+        super().__init__(hostname)
+
+        url = "https://{}/cobbler_api".format(hostname)
+
+        self.server = xmlrpc.client.ServerProxy(url)
+        logger.info("obtained a cobbler instance for the host {}".format(hostname))
+
+        self.token = self.server.login(username, password)
+        self.connect()
+
+    def create_profile(self, profile_name, ks_file):
+        """Create the profile for the system.
+
+        Args:
+            profile_name (str): Name of the profile.
+            ks_file (str): Path of the kick start file.
+        """
+        profile_attrs = {
+                "name": profile_name,
+                "kickstart": ks_file,
+                "repos": ['riftware', 'rift-misc', 'fc21-x86_64-updates',
+                          'fc21-x86_64', 'openstack-kilo'],
+                "owners": ["admin"],
+                "distro": "FC21.3-x86_64"
+                }
+
+        profile_id = self.server.new_profile(self.token)
+        for key, value in profile_attrs.items():
+            self.server.modify_profile(profile_id, key, value, self.token)
+        self.server.save_profile(profile_id, self.token)
+
+    def create_snippet(self, snippet_name, snippet_content):
+        """Unfortunately the XML rpc apis don't provide a direct interface to
+        create snippets, so falling back on the default sftp methods.
+
+        Args:
+            snippet_name (str): Name.
+            snippet_content (str): snippet's content.
+
+        Returns:
+            str: path where the snippet is stored
+        """
+        path = "/var/lib/cobbler/snippets/{}".format(snippet_name)
+        self.put(snippet_content, path)
+        return path
+
+    def create_kickstart(self, ks_name, ks_content):
+        """Creates and returns the path of the ks file.
+
+        Args:
+            ks_name (str): Name of the ks file to be saved.
+            ks_content (str): Content for ks file.
+
+        Returns:
+            str: path where the ks file is saved.
+        """
+        path = "/var/lib/cobbler/kickstarts/{}".format(ks_name)
+        self.put(ks_content, path)
+        return path
+
+    def boot_system(self, grunt, profile_name, false_boot=False):
+        """Boots the system with the profile specified. Also enable net-boot
+
+        Args:
+            grunt (Grunt): instance of grunt
+            profile_name (str): A valid profile name.
+            false_boot (bool, optional): debug only option.
+        """
+        if false_boot:
+            return
+
+        system_id = self.server.get_system_handle(
+                grunt.grunt_name,
+                self.token)
+        self.server.modify_system(
+                system_id,
+                "profile",
+                profile_name,
+                self.token)
+
+        self.server.modify_system(
+                system_id,
+                "netboot_enabled",
+                "True",
+                self.token)
+        self.server.save_system(system_id, self.token)
+        self.server.power_system(system_id, "reboot", self.token)
+
+
+class OpenstackTest(object):
+    """Driver class to automate the installation.
+    """
+    def __init__(
+            self,
+            cobbler,
+            controller,
+            compute_nodes=None,
+            test_prefix="openstack_test"):
+        """
+        Args:
+            cobbler (Cobbler): Instance of Cobbler
+            controller (Controller): Controller node instance
+            compute_nodes (TYPE, optional): A list of Grunt nodes to be set up
+                    as compute nodes.
+            test_prefix (str, optional): All entities created by the script are
+                    prefixed with this string.
+        """
+        self.cobbler = cobbler
+        self.controller = controller
+        self.compute_nodes = [] if compute_nodes is None else compute_nodes
+        self.test_prefix = test_prefix
+
+    def _prepare_snippet(self):
+        """Prepares the config based on the controller and compute nodes.
+
+        Returns:
+            str: Openstack config content.
+        """
+        content = ""
+
+        config = {}
+        config['host_name'] = self.controller.grunt_name
+        config['ip'] = self.controller.ip
+        config['dns_server'] = self.controller.dns_server
+        config['private_ip'] = self.controller.private_ip
+        config['floating_ip'] = self.controller.floating_ip
+
+        content += Template.GRUNT_CONFIG.format(**config)
+        for compute_node in self.compute_nodes:
+            config["host_name"] = compute_node.grunt_name
+            content += Template.GRUNT_CONFIG.format(**config)
+
+        content = Template.SNIPPET_TEMPLATE.format(config=content)
+
+        return content
+
+    def prepare_profile(self):
+        """Creates the cobbler profile.
+        """
+        snippet_content = self._prepare_snippet()
+        self.cobbler.create_snippet(
+                "{}.cfg".format(self.test_prefix),
+                snippet_content)
+
+        ks_content = Template.KS_TEMPATE
+        ks_file = self.cobbler.create_kickstart(
+                "{}.ks".format(self.test_prefix),
+                ks_content)
+
+        self.cobbler.create_profile(self.test_prefix, ks_file)
+        return self.test_prefix
+
+    def _get_cal_account(self):
+        """
+        Creates an object for class RwcalYang.CloudAccount()
+        """
+        account                        = RwcalYang.CloudAccount()
+        account.account_type           = "openstack"
+        account.openstack.key          = "{}_user".format(self.test_prefix)
+        account.openstack.secret       = "mypasswd"
+        account.openstack.auth_url     = 'http://{}:35357/v3/'.format(self.controller.ip)
+        account.openstack.tenant       = self.test_prefix
+
+        return account
+
+    def start(self):
+        """Starts the installation.
+        """
+        profile_name = self.prepare_profile()
+
+        self.cobbler.boot_system(self.controller, profile_name)
+        self.controller.wait_till_system_is_up(check_openstack=True)
+
+        try:
+            logger.info("Controller system is UP. Setting up compute nodes")
+            for compute_node in self.compute_nodes:
+                self.cobbler.boot_system(compute_node, profile_name)
+                compute_node.wait_till_system_is_up()
+        except OSError as e:
+            logger.error("System set-up failed {}".format(e))
+            sys.exit(1)
+
+        # Currently we don't have wrapper on top of users/projects so using
+        # keystone API directly
+        acct = self._get_cal_account()
+
+        keystone_conn = client.Client(
+                auth_url=acct.openstack.auth_url,
+                username='admin',
+                password='mypasswd')
+
+        # Create a test project
+        project = keystone_conn.projects.create(
+                acct.openstack.tenant,
+                "default",
+                description="Openstack test project")
+
+        # Create an user
+        user = keystone_conn.users.create(
+                acct.openstack.key,
+                password=acct.openstack.secret,
+                default_project=project)
+
+        # Make the newly created user as ADMIN
+        admin_role = keystone_conn.roles.list(name="admin")[0]
+        keystone_conn.roles.grant(
+                admin_role.id,
+                user=user.id,
+                project=project.id)
+
+        # nova API needs to be restarted, otherwise the new service doesn't play
+        # well
+        self.controller.ssh.exec_command("source keystonerc_admin && "
+                "service openstack-nova-api restart")
+        time.sleep(10)
+
+        return acct
+
+    def clear(self):
+        """Close out all SFTP connections.
+        """
+        nodes = [self.controller]
+        nodes.extend(self.compute_nodes)
+        for node in nodes:
+            node.clear()
+
+
+###############################################################################
+## Begin pytests
+###############################################################################
+
+
+@pytest.fixture(scope="session")
+def cal(request):
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+
+    return cal
+
+
+@pytest.fixture(scope="session")
+def account(request):
+    """Creates an openstack instance with 1 compute node and returns the newly
+    created account.
+    """
+    cobbler = Cobbler("qacobbler.eng.riftio.com")
+    controller = Grunt("grunt3.qanet.riftio.com")
+    compute_nodes = [Grunt("grunt5.qanet.riftio.com")]
+
+    test = OpenstackTest(cobbler, controller, compute_nodes)
+    account = test.start()
+
+    request.addfinalizer(test.clear)
+    return account
+
+
+def test_list_images(cal, account):
+    """Verify if 2 images are present
+    """
+    status, resources = cal.get_image_list(account)
+    assert len(resources.imageinfo_list) == 2
+
+def test_list_flavors(cal, account):
+    """Basic flavor checks
+    """
+    status, resources = cal.get_flavor_list(account)
+    assert len(resources.flavorinfo_list) == 5
+
+
+class Template(object):
+    """A container to hold all cobbler related templates.
+    """
+    GRUNT_CONFIG = """
+{host_name})
+    CONTROLLER={ip}
+    BRGIF=1
+    OVSDPDK=N
+    TRUSTED=N
+    QAT=N
+    HUGEPAGE=0
+    VLAN=10:14
+    PRIVATE_IP={private_ip}
+    FLOATING_IP={floating_ip}
+    DNS_SERVER={dns_server}
+    ;;
+
+    """
+
+    SNIPPET_TEMPLATE = """
+# =====================Begining of snippet=================
+# snippet openstack_test.cfg
+case $name in
+
+{config}
+
+*)
+    ;;
+esac
+
+# =====================End of snippet=================
+
+"""
+
+    KS_TEMPATE = """
+$SNIPPET('rift-repos')
+$SNIPPET('rift-base')
+%packages
+@core
+wget
+$SNIPPET('rift-grunt-fc21-packages')
+ganglia-gmetad
+ganglia-gmond
+%end
+
+%pre
+$SNIPPET('log_ks_pre')
+$SNIPPET('kickstart_start')
+# Enable installation monitoring
+$SNIPPET('pre_anamon')
+%end
+
+%post --log=/root/ks_post.log
+$SNIPPET('openstack_test.cfg')
+$SNIPPET('ganglia')
+$SNIPPET('rift-post-yum')
+$SNIPPET('rift-post')
+$SNIPPET('rift_fix_grub')
+
+$SNIPPET('rdo-post')
+echo "banner RDO test" >> /etc/profile
+
+$SNIPPET('kickstart_done')
+%end
+"""
diff --git a/rwcal/test/test_rwcal_openstack.py b/rwcal/test/test_rwcal_openstack.py
new file mode 100644 (file)
index 0000000..4ce494b
--- /dev/null
@@ -0,0 +1,1057 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import time
+import unittest
+import hashlib
+
+import novaclient.exceptions as nova_exception
+import paramiko
+import rw_peas
+import rwlogger
+from keystoneclient import v3 as ksclient
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+from rift.rwcal.openstack.openstack_drv import KeystoneDriver, NovaDriver
+
+logger = logging.getLogger('rwcal-openstack')
+
+#
+# Important information about openstack installation. This needs to be manually verified 
+#
+openstack_info = {
+    'username'           : 'pluto',
+    'password'           : 'mypasswd',
+    'auth_url'           : 'http://10.66.4.14:5000/v3/',
+    'project_name'       : 'demo',
+    'mgmt_network'       : 'private',
+    'reserved_flavor'    : 'm1.medium',
+    'reserved_image'     : 'rift-root-latest.qcow2',
+    'physical_network'   : None,
+    'network_type'       : None,
+    'segmentation_id'    : None
+    }
+
+
+def get_cal_account():
+    """
+    Creates an object for class RwcalYang.CloudAccount()
+    """
+    account                        = RwcalYang.CloudAccount()
+    account.account_type           = "openstack"
+    account.openstack.key          = openstack_info['username']
+    account.openstack.secret       = openstack_info['password']
+    account.openstack.auth_url     = openstack_info['auth_url']
+    account.openstack.tenant       = openstack_info['project_name']
+    account.openstack.mgmt_network = openstack_info['mgmt_network']
+    return account
+
+def get_cal_plugin():
+    """
+    Loads rw.cal plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+
+    cal = plugin.get_interface("Cloud")
+    try:
+        rc = cal.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:Cal plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Openstack Cal plugin successfully instantiated")
+    return cal 
+
+
+class OpenStackTest(unittest.TestCase):
+    NodeID = "123456789012345" # Some random number to test VM tagging
+    MemoryPageSize = "LARGE"
+    CpuPolicy = "DEDICATED"
+    CpuThreadPolicy = "SEPARATE"
+    CpuThreads = 1
+    NumaNodeCount = 2
+    HostTrust = "trusted"
+    PCIPassThroughAlias = "PCI_10G_ALIAS"
+    SEG_ID = openstack_info['segmentation_id']
+    
+    def setUp(self):
+        """
+        Assumption:
+         - It is assumed that openstack install has a flavor and image precreated.
+         - Flavor_name: x1.xlarge
+         - Image_name : rwimage
+
+        If these resources are not then this test will fail.
+        """
+        self._acct = get_cal_account()
+        logger.info("Openstack-CAL-Test: setUp")
+        self.cal   = get_cal_plugin()
+        logger.info("Openstack-CAL-Test: setUpEND")
+        
+        # First check for VM Flavor and Image and get the corresponding IDs
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == openstack_info['reserved_flavor'] ]
+        self.assertNotEqual(len(flavor_list), 0)
+        self._flavor = flavor_list[0]
+
+        rc, rs = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        image_list = [ image for image in rs.imageinfo_list if image.name == openstack_info['reserved_image'] ]
+        self.assertNotEqual(len(image_list), 0)
+        self._image = image_list[0]
+
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        networks = [ network for network in rs.networkinfo_list if (network.network_name == 'rift.cal.unittest.network' or network.network_name == 'rift.cal.virtual_link') ]
+        for network in networks:
+            self.cal.delete_virtual_link(self._acct, network.network_id)
+            
+    def tearDown(self):
+        logger.info("Openstack-CAL-Test: tearDown")
+        
+
+    def _md5(fname, blksize=1048576):
+        hash_md5 = hashlib.md5()
+        with open(fname, "rb") as f:
+            for chunk in iter(lambda: f.read(blksize), b""):
+                hash_md5.update(chunk)
+        return hash_md5.hexdigest()
+
+    @unittest.skip("Skipping test_list_flavors")        
+    def test_list_flavor(self):
+        """
+        List existing flavors from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Flavors Test")
+        rc, rsp = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d flavors" %(len(rsp.flavorinfo_list)))
+        for flavor in rsp.flavorinfo_list:
+            rc, flv = self.cal.get_flavor(self._acct, flavor.id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            self.assertEqual(flavor.id, flv.id)
+        
+    @unittest.skip("Skipping test_list_images")                    
+    def test_list_images(self):
+        """
+        List existing images from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Images Test")
+        rc, rsp = self.cal.get_image_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d images" %(len(rsp.imageinfo_list)))
+        #for image in rsp.imageinfo_list:
+        #    rc, img = self.cal.get_image(self._acct, image.id)
+        #    self.assertEqual(rc, RwStatus.SUCCESS)
+        #    self.assertEqual(image.id, img.id)
+        
+    @unittest.skip("Skipping test_list_vms")                
+    def test_list_vms(self):
+        """
+        List existing VMs from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List VMs Test")
+        rc, rsp = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VMs" %(len(rsp.vminfo_list)))
+        for vm in rsp.vminfo_list:
+            rc, server = self.cal.get_vm(self._acct, vm.vm_id)
+            self.assertEqual(vm.vm_id, server.vm_id)
+            
+    @unittest.skip("Skipping test_list_networks")                            
+    def test_list_networks(self):
+        """
+        List existing Network from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Networks Test")
+        rc, rsp = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Networks" %(len(rsp.networkinfo_list)))
+        for network in rsp.networkinfo_list:
+            rc, net = self.cal.get_network(self._acct, network.network_id)
+            self.assertEqual(network.network_id, net.network_id)
+        
+    @unittest.skip("Skipping test_list_ports")                                    
+    def test_list_ports(self):
+        """
+        List existing Ports from openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting List Ports Test")
+        rc, rsp = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        assert(rc == RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d Ports" %(len(rsp.portinfo_list)))
+        for port in rsp.portinfo_list:
+            rc, p = self.cal.get_port(self._acct, port.port_id)
+            self.assertEqual(port.port_id, p.port_id)
+
+    def _get_image_info_request(self):
+        """
+        Returns request object of type RwcalYang.ImageInfoItem()
+        """
+        img = RwcalYang.ImageInfoItem()
+        img.name = "rift.cal.unittest.image"
+        img.location = '/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
+        img.disk_format = "qcow2"
+        img.container_format = "bare"
+        img.checksum = self._md5(img.location)
+        return img
+
+    def _get_image_info(self, img_id):
+        """
+        Checks the image status until it becomes active or timeout occurs (100sec)
+        Returns the image_info dictionary
+        """
+        rs = None
+        rc = None
+        for i in range(100):
+            rc, rs = self.cal.get_image(self._acct, img_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, rs.state))
+            if rs.state == 'active':
+                break
+            else:
+                time.sleep(2) # Sleep for a second
+        return rs
+    
+    @unittest.skip("Skipping test_create_delete_image")                            
+    def test_create_delete_image(self):
+        """
+        Create/Query/Delete a new image in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create test")
+        img = self._get_image_info_request()
+        rc, img_id = self.cal.create_image(self._acct, img)
+        logger.info("Openstack-CAL-Test: Created Image with image_id: %s" %(img_id))
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        img_info = self._get_image_info(img_id)
+        self.assertNotEqual(img_info, None)
+        self.assertEqual(img_id, img_info.id)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) reached state : %s" %(img_id, img_info.state))
+        self.assertEqual(img_info.has_field('checksum'), True)
+        #self.assertEqual(img_info.checksum, OpenStackTest.IMG_Checksum)
+        logger.info("Openstack-CAL-Test: Initiating Delete Image operation for image_id: %s" %(img_id))
+        rc = self.cal.delete_image(self._acct, img_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Image (image_id: %s) successfully deleted" %(img_id))
+
+    def _get_flavor_info_request(self):
+        """
+        Returns request object of type RwcalYang.FlavorInfoItem()
+        """
+        flavor                                     = RwcalYang.FlavorInfoItem()
+        flavor.name                                = 'rift.cal.unittest.flavor'
+        flavor.vm_flavor.memory_mb                 = 16384 # 16GB
+        flavor.vm_flavor.vcpu_count                = 4 
+        flavor.vm_flavor.storage_gb                = 40 # 40GB
+        flavor.guest_epa.mempage_size              = OpenStackTest.MemoryPageSize
+        flavor.guest_epa.cpu_pinning_policy        = OpenStackTest.CpuPolicy
+        flavor.guest_epa.cpu_thread_pinning_policy = OpenStackTest.CpuThreadPolicy
+        flavor.guest_epa.numa_node_policy.node_cnt = OpenStackTest.NumaNodeCount
+        for i in range(OpenStackTest.NumaNodeCount):
+            node = flavor.guest_epa.numa_node_policy.node.add()
+            node.id = i
+            if i == 0:
+                node.vcpu = [0,1]
+            elif i == 1:
+                node.vcpu = [2,3]
+            node.memory_mb = 8196
+        dev = flavor.guest_epa.pcie_device.add()
+        dev.device_id = OpenStackTest.PCIPassThroughAlias
+        dev.count = 1
+        return flavor
+        
+    @unittest.skip("Skipping test_create_delete_flavor")                            
+    def test_create_delete_flavor(self):
+        """
+        Create/Query/Delete a new flavor in openstack installation
+        """
+        logger.info("Openstack-CAL-Test: Starting Image create/delete test")
+
+        ### Delete any previously created flavor with name rift.cal.unittest.flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.name == 'rift.cal.unittest.flavor' ]
+        if flavor_list:
+            rc = self.cal.delete_flavor(self._acct, flavor_list[0].id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        flavor = self._get_flavor_info_request()
+        rc, flavor_id = self.cal.create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        logger.info("Openstack-CAL-Test: Created new flavor with flavor_id : %s" %(flavor_id))
+        rc, rs = self.cal.get_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.id, flavor_id)
+
+        # Verify EPA Attributes
+        self.assertEqual(rs.guest_epa.mempage_size, OpenStackTest.MemoryPageSize)
+        self.assertEqual(rs.guest_epa.cpu_pinning_policy, OpenStackTest.CpuPolicy)
+        self.assertEqual(rs.guest_epa.cpu_thread_pinning_policy, OpenStackTest.CpuThreadPolicy)
+        self.assertEqual(rs.guest_epa.numa_node_policy.node_cnt, OpenStackTest.NumaNodeCount)
+        self.assertEqual(len(rs.guest_epa.pcie_device), 1)
+        self.assertEqual(rs.guest_epa.pcie_device[0].device_id, OpenStackTest.PCIPassThroughAlias)
+        self.assertEqual(rs.guest_epa.pcie_device[0].count, 1)
+        logger.info("Openstack-CAL-Test: Initiating delete for flavor_id : %s" %(flavor_id))
+        rc = self.cal.delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        # Check that flavor does not exist anymore in list_flavor
+        rc, rs = self.cal.get_flavor_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor_list = [ flavor for flavor in rs.flavorinfo_list if flavor.id == flavor_id ]
+        # Flavor List should be empty
+        self.assertEqual(len(flavor_list), 0)
+        logger.info("Openstack-CAL-Test: Flavor (flavor_id: %s) successfully deleted" %(flavor_id))
+
+    def _get_vm_info_request(self, flavor_id, image_id):
+        """
+        Returns request object of type RwcalYang.VMInfoItem
+        """
+        vm = RwcalYang.VMInfoItem()
+        vm.vm_name = 'rift.cal.unittest.vm'
+        vm.flavor_id = flavor_id
+        vm.image_id  = image_id
+        vm.cloud_init.userdata = ''
+        vm.user_tags.node_id  = OpenStackTest.NodeID
+        return vm
+
+    def _check_vm_state(self, vm_id, expected_state):
+        """
+        Wait until VM reaches particular state (expected_state). 
+        """
+        # Wait while VM goes to required state
+
+        for i in range(50): # 50 poll iterations...
+            rc, rs = self.cal.get_vm(self._acct, vm_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+            if rs.state == expected_state:
+                break
+            else:
+                time.sleep(1)
+
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, expected_state)
+
+    def _create_vm(self, flavor, image, port_list = None):
+        """
+        Create VM and perform validity checks
+        """
+        logger.info("Openstack-CAL-Test: Using image : %s and flavor : %s " %(image.name, flavor.name))
+        vm = self._get_vm_info_request(flavor.id, image.id)
+
+        if port_list:
+            for port_id in port_list:
+                port = vm.port_list.add()
+                port.port_id = port_id 
+
+        rc, vm_id = self.cal.create_vm(self._acct, vm)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Check if VM creation is successful
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully created VM with vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        ### Ensure the VM state is active
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        ### Ensure that userdata tags are set as expected
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.user_tags.has_field('node_id'), True)
+        self.assertEqual(getattr(rs.user_tags, 'node_id'), OpenStackTest.NodeID)
+        logger.info("Openstack-CAL-Test: Successfully verified the user tags for VM-ID: %s" %(vm_id))
+        return rs, vm_id
+
+    def _delete_vm(self, vm_id):
+        """
+        Delete VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Initiating VM Delete operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+
+        rc = self.cal.delete_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for i in range(50):
+            # Check if VM still exists
+            rc, rs = self.cal.get_vm_list(self._acct)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+            if not len(vm_list):
+                break
+        
+        rc, rs = self.cal.get_vm_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        vm_list = [vm for vm in rs.vminfo_list if vm.vm_id == vm_id]
+        self.assertEqual(len(vm_list), 0)
+        logger.info("Openstack-CAL-Test: VM with vm_id : %s successfully deleted" %(vm_id))
+
+    def _stop_vm(self, vm_id):
+        """
+        Stop VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Stop VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.stop_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        ### Ensure that VM state is SHUTOFF
+        self._check_vm_state(vm_id, 'SHUTOFF')
+        
+        
+    def _start_vm(self, vm_id):
+        """
+        Starts VM and performs validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Start VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.start_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+        
+    def _reboot_vm(self, vm_id):
+        """
+        Reboot VM and perform validity checks
+        """
+        rc, rs = self.cal.get_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Initiating Reboot VM operation on VM vm_id : %s. Current VM state : %s " %(vm_id, rs.state))
+        rc = self.cal.reboot_vm(self._acct, vm_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Ensure that VM state is ACTIVE
+        self._check_vm_state(vm_id, 'ACTIVE')
+
+    def assert_vm(self, vm_data, flavor):
+        """Verify the newly created VM for attributes specified in the flavor.
+
+        Args:
+            vm_data (VmData): Instance of the newly created VM
+            flavor (FlavorInfoItem): Config flavor.
+        """
+        vm_config = flavor
+
+        # Page size seems to be 4096, regardless of the page size name.
+        page_lookup = {"large": '4096', "small": '4096'}
+        FIELDS = ["vcpus", "cpu_threads", "memory_page_size", "disk",
+                  "numa_node_count", "memory", "pci_passthrough_device_list"]
+
+        for field in FIELDS:
+            if field not in vm_config:
+                continue
+
+            vm_value = getattr(vm_data, field)
+            config_value = getattr(vm_config, field)
+
+            if field == "memory_page_size":
+                config_value = page_lookup[config_value]
+
+            if field == "memory":
+                config_value = int(config_value/1000)
+
+            if field == "pci_passthrough_device_list":
+                config_value = len(config_value)
+                vm_value = len(vm_value)
+
+            self.assertEqual(vm_value, config_value)
+
+    @unittest.skip("Skipping test_vm_epa_attributes")
+    def test_vm_epa_attributes(self):
+        """
+        Primary goal: To create a VM with the specified EPA Attributes
+        Secondary goal: To verify flavor creation/delete
+        """
+
+        logger.info("Openstack-CAL-Test: Starting VM(EPA) create/delete test")
+        flavor = self._get_flavor_info_request()
+   
+        rc, flavor_id = self.cal.do_create_flavor(self._acct, flavor)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        flavor.id = flavor_id
+
+        data, vm_id = self._create_vm(flavor, self._image)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, flavor)
+
+        self._delete_vm(vm_id)
+
+        rc = self.cal.do_delete_flavor(self._acct, flavor_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+    @unittest.skip("Skipping test_expiry_token")
+    def test_expiry_token(self):
+        """
+        Primary goal: To verify if we are refreshing the expired tokens.
+        """
+        logger.info("Openstack-CAL-Test: Starting token refresh test")
+        drv = KeystoneDriver(
+                openstack_info['username'],
+                openstack_info['password'],
+                openstack_info['auth_url'],
+                openstack_info['project_name'])
+        # Get hold of the client instance need for Token Manager
+        client = drv._get_keystone_connection()
+
+        auth_ref = client.auth_ref
+        token = auth_ref['auth_token']
+
+        # Verify if the newly acquired token works.
+        nova = NovaDriver(drv)
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+        # Invalidate the token
+        token_manger = ksclient.tokens.TokenManager(client)
+        token_manger.revoke_token(token)
+
+        time.sleep(10)
+
+        unauth_exp = False
+        try:
+            flavors = nova.flavor_list()
+            print (flavors)
+        except nova_exception.AuthorizationFailure:
+            unauth_exp = True
+
+        self.assertTrue(unauth_exp)
+
+        # Explicitly reset the expire time, to test if we acquire a new token
+        now = datetime.datetime.utcnow()
+        time_str = format(now, "%Y-%m-%dT%H:%M:%S.%fZ")
+        drv._get_keystone_connection().auth_ref['expires_at'] = time_str
+
+        flavors = nova.flavor_list()
+        self.assertTrue(len(flavors) > 1)
+
+    @unittest.skip("Skipping test_vm_operations")                            
+    def test_vm_operations(self):
+        """
+        Primary goal: Create/Query/Delete VM in openstack installation.
+        Secondary goal: VM pause/resume operations on VM.
+
+        """
+        logger.info("Openstack-CAL-Test: Starting VM Operations test")
+
+        # Create VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        # Stop VM
+        self._stop_vm(vm_id)
+        # Start VM
+        self._start_vm(vm_id)
+
+        vm_data = VmData(data.host_name, data.management_ip)
+        self.assert_vm(vm_data, self._flavor)
+
+        # Reboot VM
+        self._reboot_vm(vm_id)
+        ### Delete the VM
+        self._delete_vm(vm_id)
+
+        
+    def _get_network_info_request(self):
+        """
+        Returns request object of type RwcalYang.NetworkInfoItem
+        """
+        network                            = RwcalYang.NetworkInfoItem()
+        network.network_name               = 'rift.cal.unittest.network'
+        network.subnet                     = '192.168.16.0/24'
+        if openstack_info['physical_network']:
+            network.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            network.provider_network.overlay_type     = openstack_info['network_type']
+        if OpenStackTest.SEG_ID:
+            network.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return network
+
+
+    def _create_network(self):
+        """
+        Create a network and verify that network creation is successful
+        """
+        network = self._get_network_info_request()
+
+        ### Create network
+        logger.info("Openstack-CAL-Test: Creating a network with name : %s" %(network.network_name))
+        rc, net_id = self.cal.create_network(self._acct, network)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Verify network is created successfully
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Network : %s  with id : %s." %(network.network_name, net_id ))
+
+        return net_id
+
+    def _delete_network(self, net_id):
+        """
+        Delete network and verify that delete operation is successful
+        """
+        rc, rs = self.cal.get_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        logger.info("Openstack-CAL-Test: Deleting a network with id : %s. " %(net_id))
+        rc = self.cal.delete_network(self._acct, net_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        
+        # Verify that network is no longer available via get_network_list API
+        rc, rs = self.cal.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        network_info = [ network for network in rs.networkinfo_list if network.network_id == net_id ]
+        self.assertEqual(len(network_info), 0)
+        logger.info("Openstack-CAL-Test: Successfully deleted Network with id : %s" %(net_id))
+        
+        
+    @unittest.skip("Skipping test_network_operations")                            
+    def test_network_operations(self):
+        """
+        Create/Delete Networks
+        """
+        logger.info("Openstack-CAL-Test: Starting Network Operation test")
+
+        ### Create Network
+        net_id = self._create_network()
+
+        ### Delete Network
+        self._delete_network(net_id)
+
+    def _get_port_info_request(self, network_id, vm_id):
+        """
+        Returns an object of type RwcalYang.PortInfoItem
+        """
+        port = RwcalYang.PortInfoItem()
+        port.port_name = 'rift.cal.unittest.port'
+        port.network_id = network_id
+        if vm_id != None:
+            port.vm_id = vm_id
+        return port
+
+    def _create_port(self, net_id, vm_id = None):
+        """
+        Create a port in network with network_id: net_id and verifies that operation is successful
+        """
+        if vm_id != None:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s and VM with vm_id: %s" %(net_id, vm_id))
+        else:
+            logger.info("Openstack-CAL-Test: Creating a port in network with network_id: %s" %(net_id))
+
+        ### Create Port
+        port = self._get_port_info_request(net_id, vm_id)
+        rc, port_id = self.cal.create_port(self._acct, port)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        ### Get Port
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Successfully create Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        return port_id
+
+    def _delete_port(self, port_id):
+        """
+        Deletes a port and verifies that operation is successful
+        """
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Deleting Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+
+        ### Delete Port
+        self.cal.delete_port(self._acct, port_id)
+        
+        rc, rs = self.cal.get_port_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        port_list = [ port for port in rs.portinfo_list if port.port_id == port_id ]
+        self.assertEqual(len(port_list), 0)
+        logger.info("Openstack-CAL-Test: Successfully Deleted Port with id : %s" %(port_id))
+
+    def _monitor_port(self, port_id, expected_state):
+        """
+        Monitor the port state until it reaches expected_state
+        """
+        for i in range(50):
+            rc, rs = self.cal.get_port(self._acct, port_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: Port with id : %s. Port State :  %s" %(port_id, rs.port_state))
+            if rs.port_state == expected_state:
+                break
+        rc, rs = self.cal.get_port(self._acct, port_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.port_state, expected_state)
+        logger.info("Openstack-CAL-Test: Port with port_id : %s reached expected state  : %s" %(port_id, rs.port_state))
+            
+    @unittest.skip("Skipping test_port_operations_with_vm")
+    def test_port_operations_with_vm(self):
+        """
+        Create/Delete Ports in a network and associate it with a VM
+        """
+        logger.info("Openstack-CAL-Test: Starting Port Operation test with VM")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image)
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id, vm_id)
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_create_vm_with_port")
+    def test_create_vm_with_port(self):
+        """
+        Create VM and add ports to it during boot time.
+        """
+        logger.info("Openstack-CAL-Test: Starting Create VM with port test")
+
+        ### First create a network
+        net_id = self._create_network()
+
+        ### Now create Port which connects VM to Network
+        port_id = self._create_port(net_id)
+
+        ### Create a VM
+        data, vm_id = self._create_vm(self._flavor, self._image, [port_id])
+
+        ### Verify that port goes to active state
+        self._monitor_port(port_id, 'ACTIVE')
+
+        ### Delete VM
+        self._delete_vm(vm_id)
+        
+        ### Delete Port
+        self._delete_port(port_id)
+
+        ### Delete the network
+        self._delete_network(net_id)
+
+    @unittest.skip("Skipping test_get_vdu_list")
+    def test_get_vdu_list(self):
+        """
+        Test the get_vdu_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get VDU List APIs")
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d VDUs" %(len(rsp.vdu_info_list)))
+        for vdu in rsp.vdu_info_list:
+            rc, vdu2 = self.cal.get_vdu(self._acct, vdu.vdu_id)
+            self.assertEqual(vdu2.vdu_id, vdu.vdu_id)
+
+
+    @unittest.skip("Skipping test_get_virtual_link_list")
+    def test_get_virtual_link_list(self):
+        """
+        Test the get_virtual_link_list API
+        """
+        logger.info("Openstack-CAL-Test: Test Get virtual_link List APIs")
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Received %d virtual_links" %(len(rsp.virtual_link_info_list)))
+        for virtual_link in rsp.virtual_link_info_list:
+            rc, virtual_link2 = self.cal.get_virtual_link(self._acct, virtual_link.virtual_link_id)
+            self.assertEqual(virtual_link2.virtual_link_id, virtual_link.virtual_link_id)
+
+    def _get_virtual_link_request_info(self):
+        """
+        Returns object of type RwcalYang.VirtualLinkReqParams
+        """
+        vlink = RwcalYang.VirtualLinkReqParams()
+        vlink.name = 'rift.cal.virtual_link'
+        vlink.subnet = '192.168.1.0/24'
+        if openstack_info['physical_network']:
+            vlink.provider_network.physical_network = openstack_info['physical_network']
+        if openstack_info['network_type']:
+            vlink.provider_network.overlay_type     = openstack_info['network_type'].upper()
+        if OpenStackTest.SEG_ID:
+            vlink.provider_network.segmentation_id  = OpenStackTest.SEG_ID
+            OpenStackTest.SEG_ID += 1
+        return vlink
+        
+    def _get_vdu_request_info(self, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUInitParams
+        """
+        vdu = RwcalYang.VDUInitParams()
+        vdu.name = "cal.vdu"
+        vdu.node_id = OpenStackTest.NodeID
+        vdu.image_id = self._image.id
+        vdu.flavor_id = self._flavor.id
+        vdu.vdu_init.userdata = ''
+        vdu.allocate_public_address = True
+        c1 = vdu.connection_points.add()
+        c1.name = "c_point1"
+        c1.virtual_link_id = virtual_link_id
+        c1.type_yang = 'VIRTIO'
+        return vdu
+
+    def _get_vdu_modify_request_info(self, vdu_id, virtual_link_id):
+        """
+        Returns object of type RwcalYang.VDUModifyParams
+        """
+        vdu = RwcalYang.VDUModifyParams()
+        vdu.vdu_id = vdu_id
+        c1 = vdu.connection_points_add.add()
+        c1.name = "c_modify1"
+        c1.virtual_link_id = virtual_link_id
+       
+        return vdu 
+        
+    #@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
+    def test_create_delete_virtual_link_and_vdu(self):
+        """
+        Test to create VDU
+        """
+        logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
+        vlink_req = self._get_virtual_link_request_info()
+
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id = rsp
+        
+        #Check if virtual_link create is successful
+        rc, rsp = self.cal.get_virtual_link(self._acct, rsp)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rsp.virtual_link_id, vlink_id)
+
+        # Now create VDU
+        vdu_req = self._get_vdu_request_info(vlink_id)
+        logger.info("Openstack-CAL-Test: Test Create VDU API")
+
+        rc, rsp = self.cal.create_vdu(self._acct, vdu_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created vdu with Id: %s" %rsp)
+
+        vdu_id = rsp
+
+        ## Check if VDU create is successful
+        rc, rsp = self.cal.get_vdu(self._acct, rsp)
+        self.assertEqual(rsp.vdu_id, vdu_id)
+
+        ### Wait until vdu_state is active
+        for i in range(50):
+            rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+            self.assertEqual(rc, RwStatus.SUCCESS)
+            logger.info("Openstack-CAL-Test: VDU with id : %s. Reached State :  %s" %(vdu_id, rs.state))
+            if rs.state == 'active':
+                break
+        rc, rs = self.cal.get_vdu(self._acct, vdu_id)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        self.assertEqual(rs.state, 'active')
+        logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state  : %s" %(vdu_id, rs.state))
+        logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rs))
+        
+        vlink_req = self._get_virtual_link_request_info()
+
+        ### Create another virtual_link
+        rc, rsp = self.cal.create_virtual_link(self._acct, vlink_req)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Created virtual_link with Id: %s" %rsp)
+        vlink_id2= rsp
+
+        ### Now exercise the modify_vdu_api
+        vdu_modify = self._get_vdu_modify_request_info(vdu_id, vlink_id2)
+        rc = self.cal.modify_vdu(self._acct, vdu_modify)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        logger.info("Openstack-CAL-Test: Modified vdu with Id: %s" %vdu_id)
+
+        ### Lets delete the VDU
+        self.cal.delete_vdu(self._acct, vdu_id)
+
+        ### Lets delete the Virtual Link
+        self.cal.delete_virtual_link(self._acct, vlink_id)
+
+        ### Lets delete the Virtual Link-2
+        self.cal.delete_virtual_link(self._acct, vlink_id2)
+
+        time.sleep(5)
+        ### Verify that VDU and virtual link are successfully deleted
+        rc, rsp = self.cal.get_vdu_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+        for vdu in rsp.vdu_info_list:
+            self.assertNotEqual(vdu.vdu_id, vdu_id)
+
+        rc, rsp = self.cal.get_virtual_link_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS)
+
+        for virtual_link in rsp.virtual_link_info_list:
+            self.assertNotEqual(virtual_link.virtual_link_id, vlink_id)
+
+        logger.info("Openstack-CAL-Test: VDU/Virtual Link create-delete test successfully completed")
+
+
+class VmData(object):
+    """A convenience class that provides all the stats and EPA Attributes
+    from the VM provided
+    """
+    def __init__(self, host, mgmt_ip):
+        """
+        Args:
+            host (str): host name.
+            mgmt_ip (str): The IP of the newly created VM.
+        """
+        # Sleep for 20s to ensure the VM is UP and ready to run commands
+        time.sleep(20)
+        logger.info("Connecting to host: {} and IP: {}".format(host, mgmt_ip))
+        self.client = paramiko.SSHClient()
+        self.client.set_missing_host_key_policy(paramiko.WarningPolicy())
+        self.client.connect(host)
+        self.ip = mgmt_ip
+
+        # Get all data from the newly created VM.
+        self._data = self._get_data()
+        self._page_size = self._exec_and_clean("getconf PAGE_SIZE")
+        self._disk_space = self._exec_and_clean(
+                "df -kh --output=size /",
+                line_no=1)
+        self._pci_data = self._exec('lspci -m | grep "10-Gigabit"')
+
+    def _get_data(self,):
+        """Runs the command and store the output in a python dict.
+
+        Returns:
+            dict: Containing all key => value pairs.
+        """
+        content = {}
+        cmds = ["lscpu", 'less /proc/meminfo']
+        for cmd in cmds:
+            ssh_out = self._exec(cmd)
+            content.update(self._convert_to_dict(ssh_out))
+        return content
+
+    def _exec_and_clean(self, cmd, line_no=0):
+        """A convenience method to run a command and extract the specified line
+        number.
+
+        Args:
+            cmd (str): Command to execute
+            line_no (int, optional): Default to 0, extracts the first line.
+
+        Returns:
+            str: line_no of the output of the command.
+        """
+        output = self._exec(cmd)[line_no]
+        output = ' '.join(output.split())
+        return output.strip()
+
+    def _exec(self, cmd):
+        """Thin wrapper that runs the command and returns the stdout data
+
+        Args:
+            cmd (str): Command to execute.
+
+        Returns:
+            list: Contains the command output.
+        """
+        _, ssh_out, _ = self.client.exec_command(
+                "/usr/rift/bin/ssh_root {} {}".format(self.ip,
+                                                      cmd))
+        return ssh_out.readlines()
+
+    def _convert_to_dict(self, content):
+        """convenience method that cleans and stores the line into dict.
+        data is split based on ":" or " ".
+
+        Args:
+            content (list): A list containing the stdout.
+
+        Returns:
+            dict: containing stat attribute => value.
+        """
+        flattened = {}
+        for line in content:
+            line = ' '.join(line.split())
+            if ":" in line:
+                key, value = line.split(":")
+            else:
+                key, value = line.split(" ")
+            key, value = key.strip(), value.strip()
+            flattened[key] = value
+        return flattened
+
+    @property
+    def disk(self):
+        disk = self._disk_space.replace("G", "")
+        return int(disk)
+
+    @property
+    def numa_node_count(self):
+        numa_cores = self._data['NUMA node(s)']
+        numa_cores = int(numa_cores)
+        return numa_cores
+
+    @property
+    def vcpus(self):
+        cores = int(self._data['CPU(s)'])
+        return cores
+
+    @property
+    def cpu_threads(self):
+        threads = int(self._data['Thread(s) per core'])
+        return threads
+
+    @property
+    def memory(self):
+        memory = self._data['MemTotal']
+        memory = int(memory.replace("kB", ""))/1000/1000
+        return int(memory)
+
+    @property
+    def memory_page_size(self):
+        return self._page_size
+
+    @property
+    def pci_passthrough_device_list(self):
+        return self._pci_data
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
diff --git a/rwcal/test/test_rwlxc_rwlaunchpad.py b/rwcal/test/test_rwlxc_rwlaunchpad.py
new file mode 100644 (file)
index 0000000..0119232
--- /dev/null
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+
+import rift.rwcal.cloudsim.lxc as lxc
+import rift.rwcal.cloudsim.lvm as lvm
+
+
+logger = logging.getLogger('rwcal-test')
+
+
+def main():
+    template = os.path.realpath("../rift/cal/lxc-fedora-rift.lxctemplate")
+    tarfile = "/net/strange/localdisk/jdowner/lxc.tar.gz"
+    volume = 'rift-test'
+
+    lvm.create(volume, '/lvm/rift-test.img')
+
+    master = lxc.create_container('test-master', template, volume, tarfile)
+
+    snapshots = []
+    for index in range(5):
+        snapshots.append(master.snapshot('test-snap-{}'.format(index + 1)))
+
+    for snapshot in snapshots:
+        snapshot.destroy()
+
+    master.destroy()
+
+    lvm.destroy(volume)
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    main()
diff --git a/rwcm/CMakeLists.txt b/rwcm/CMakeLists.txt
new file mode 100644 (file)
index 0000000..581c9bc
--- /dev/null
@@ -0,0 +1,35 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Manish Patel
+# Creation Date: 10/28/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwcm)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+  plugins
+  test
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcm/plugins/CMakeLists.txt b/rwcm/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..1522628
--- /dev/null
@@ -0,0 +1,30 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Manish Patel
+# Creation Date: 10/29/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  yang
+  rwconman
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwcm/plugins/rwconman/CMakeLists.txt b/rwcm/plugins/rwconman/CMakeLists.txt
new file mode 100644 (file)
index 0000000..adeb27c
--- /dev/null
@@ -0,0 +1,57 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Manish Patel
+# Creation Date: 10/28/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwconmantasklet)
+set(CONMAN_INSTALL "etc/conman")
+
+##
+# Install translation script in demos
+##
+install(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/xlate_cfg.py
+    rift/tasklets/${TASKLET_NAME}/xlate_tags.yml
+  DESTINATION ${CONMAN_INSTALL}
+  COMPONENT ${PKG_LONG_NAME})
+
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/rwconman_config.py
+    rift/tasklets/${TASKLET_NAME}/rwconman_events.py
+    rift/tasklets/${TASKLET_NAME}/jujuconf.py
+    rift/tasklets/${TASKLET_NAME}/RiftCA.py
+    rift/tasklets/${TASKLET_NAME}/riftcm_config_plugin.py
+    rift/tasklets/${TASKLET_NAME}/RiftCM_rpc.py
+    rift/tasklets/${TASKLET_NAME}/rwconman_conagent.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCA.py
new file mode 100644 (file)
index 0000000..4a95a7d
--- /dev/null
@@ -0,0 +1,299 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+import re
+import tempfile
+import yaml
+import os
+
+from gi.repository import (
+    RwDts as rwdts,
+)
+
+from . import riftcm_config_plugin
+from . import rwconman_events as Events
+
+class RiftCAConfigPlugin(riftcm_config_plugin.RiftCMConfigPluginBase):
+    """
+        Implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
+    """
+    def __init__(self, dts, log, loop, account):
+        riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+        self._name = account.name
+        self._type = riftcm_config_plugin.DEFAULT_CAP_TYPE
+        self._rift_install_dir = os.environ['RIFT_INSTALL']
+        self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
+        self._rift_vnfs = {}
+        self._tasks = {}
+
+        # Instantiate events that will handle RiftCA configuration requests
+        self._events = Events.ConfigManagerEvents(dts, log, loop, self)
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def agent_type(self):
+        return self._type
+
+    @asyncio.coroutine
+    def notify_create_vlr(self, agent_nsr, agent_vnfr, vld, vlr):
+        """
+        Notification of create VL record
+        """
+        pass
+
+    @asyncio.coroutine
+    def is_vnf_configurable(self, agent_vnfr):
+        '''
+        This needs to be part of abstract class
+        '''
+        loop_count = 10
+        while loop_count:
+            loop_count -= 1
+            # Set this VNF's configurability status (need method to check)
+            yield from asyncio.sleep(2, loop=self._loop)
+
+    def riftca_log(self, name, level, log_str, *args):
+        getattr(self._log, level)('RiftCA:({}) {}'.format(name, log_str), *args)
+        
+    @asyncio.coroutine
+    def notify_create_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of create Network VNF record
+        """
+        # Deploy the charm if specified for the vnf
+        self._log.debug("Rift config agent: create vnfr nsr={}  vnfr={}"
+                        .format(agent_nsr.name, agent_vnfr.name))
+        try:
+            self._loop.create_task(self.is_vnf_configurable(agent_vnfr))
+        except Exception as e:
+            self._log.debug("Rift config agent: vnf_configuration error for VNF:%s/%s: %s",
+                            agent_nsr.name, agent_vnfr.name, str(e))
+            return False
+
+        return True
+
+    @asyncio.coroutine
+    def notify_instantiate_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of Instantiate NSR with the passed nsr id
+        """
+        pass
+
+    @asyncio.coroutine
+    def notify_instantiate_vlr(self, agent_nsr, agent_vnfr, vlr):
+        """
+        Notification of Instantiate NSR with the passed nsr id
+        """
+        pass
+
+    @asyncio.coroutine
+    def notify_terminate_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of Terminate the network service
+        """
+
+    @asyncio.coroutine
+    def notify_terminate_vlr(self, agent_nsr, agent_vnfr, vlr):
+        """
+        Notification of Terminate the virtual link
+        """
+        pass
+
+    @asyncio.coroutine
+    def vnf_config_primitive(self, agent_nsr, agent_vnfr, primitive, output):
+        '''
+        primitives support by RiftCA
+        '''
+        pass
+        
+    @asyncio.coroutine
+    def apply_config(self, config, nsr, vnfr, rpc_ip):
+        """ Notification on configuration of an NSR """
+        pass
+
+    @asyncio.coroutine
+    def apply_ns_config(self, agent_nsr, agent_vnfrs, rpc_ip):
+        """Hook: Runs the user defined script. Feeds all the necessary data
+        for the script thro' yaml file.
+
+        Args:
+            rpc_ip (YangInput_Nsr_ExecNsConfigPrimitive): The input data.
+            nsr (NetworkServiceRecord): Description
+            vnfrs (dict): VNFR ID => VirtualNetworkFunctionRecord
+        """
+
+        def xlate(tag, tags):
+            # TBD
+            if tag is None or tags is None:
+                return tag
+            val = tag
+            if re.search('<.*>', tag):
+                try:
+                    if tag == '<rw_mgmt_ip>':
+                        val = tags['rw_mgmt_ip']
+                except KeyError as e:
+                    self._log.info("RiftCA: Did not get a value for tag %s, e=%s",
+                                   tag, e)
+            return val
+
+        def get_meta(agent_nsr, agent_vnfrs):
+            unit_names, initial_params, vnfr_index_map, vnfr_data_map = {}, {}, {}, {}
+
+            for vnfr_id in agent_nsr.vnfr_ids:
+                vnfr = agent_vnfrs[vnfr_id]
+
+                # index->vnfr ref
+                vnfr_index_map[vnfr.member_vnf_index] = vnfr_id
+                vnfr_data_dict = dict()
+                if 'mgmt_interface' in vnfr.vnfr:
+                    vnfr_data_dict['mgmt_interface'] = vnfr.vnfr['mgmt_interface']
+
+                vnfr_data_dict['connection_point'] = []
+                if 'connection_point' in vnfr.vnfr:
+                    for cp in vnfr.vnfr['connection_point']:
+                        cp_dict = dict()
+                        cp_dict['name'] = cp['name']
+                        cp_dict['ip_address'] = cp['ip_address']
+                        vnfr_data_dict['connection_point'].append(cp_dict)
+
+                vnfr_data_dict['vdur'] = []
+                vdu_data = [(vdu['name'], vdu['management_ip'], vdu['vm_management_ip'], vdu['id'])
+                        for vdu in vnfr.vnfr['vdur']]
+
+                for data in vdu_data:
+                    data = dict(zip(['name', 'management_ip', 'vm_management_ip', 'id'] , data))
+                    vnfr_data_dict['vdur'].append(data)
+
+                vnfr_data_map[vnfr.member_vnf_index] = vnfr_data_dict
+                # Unit name
+                unit_names[vnfr_id] = vnfr.name
+                # Flatten the data for simplicity
+                param_data = {}
+                if 'initial_config_primitive' in vnfr.vnf_configuration:
+                    for primitive in vnfr.vnf_configuration['initial_config_primitive']:
+                        for parameter in primitive.parameter:
+                            value = xlate(parameter.value, vnfr.tags)
+                            param_data[parameter.name] = value
+
+                initial_params[vnfr_id] = param_data
+
+
+            return unit_names, initial_params, vnfr_index_map, vnfr_data_map
+
+        unit_names, init_data, vnfr_index_map, vnfr_data_map = get_meta(agent_nsr, agent_vnfrs)
+        # The data consists of 4 sections
+        # 1. Account data
+        # 2. The input passed.
+        # 3. Unit names (keyed by vnfr ID).
+        # 4. Initial config data (keyed by vnfr ID).
+        data = dict()
+        data['config_agent'] = dict(
+                name=self._name,
+                )
+        data["rpc_ip"] = rpc_ip.as_dict()
+        data["unit_names"] = unit_names
+        data["init_config"] = init_data
+        data["vnfr_index_map"] = vnfr_index_map
+        data["vnfr_data_map"] = vnfr_data_map
+
+        tmp_file = None
+        with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+            tmp_file.write(yaml.dump(data, default_flow_style=True)
+                    .encode("UTF-8"))
+
+        # Get the full path to the script
+        script = ''
+        if rpc_ip.user_defined_script[0] == '/':
+            # The script has full path, use as is
+            script = rpc_ip.user_defined_script
+        else:
+            script = os.path.join(self._rift_artif_dir, 'launchpad/libs', agent_nsr.nsd_id, 'scripts',
+                                  rpc_ip.user_defined_script)
+            self._log.debug("Rift config agent: Checking for script in %s", script)
+            if not os.path.exists(script):
+                self._log.debug("Rift config agent: Did not find scipt %s", script)
+                script = os.path.join(self._rift_install_dir, 'usr/bin', rpc_ip.user_defined_script)
+
+        cmd = "{} {}".format(script, tmp_file.name)
+        self._log.debug("Rift config agent: Running the CMD: {}".format(cmd))
+
+        coro = asyncio.create_subprocess_shell(cmd, loop=self._loop,
+                                               stderr=asyncio.subprocess.PIPE)
+        process = yield from coro
+        err = yield from process.stderr.read()
+        task = self._loop.create_task(process.wait())
+
+        return task, err
+
+    @asyncio.coroutine
+    def apply_initial_config(self, agent_nsr, agent_vnfr):
+        """
+        Apply the initial configuration
+        """
+        rc = False
+        self._log.debug("Rift config agent: Apply initial config to VNF:%s/%s",
+                        agent_nsr.name, agent_vnfr.name)
+        try:
+            if agent_vnfr.id in self._rift_vnfs.keys():
+                # Check if VNF instance is configurable (TBD - future)
+                ### Remove this once is_vnf_configurable() is implemented
+                agent_vnfr.set_to_configurable()
+                if agent_vnfr.is_configurable:
+                    # apply initial config for the vnfr
+                    rc = yield from self._events.apply_vnf_config(agent_vnfr.vnf_cfg)
+                else:
+                    self._log.info("Rift config agent: VNF:%s/%s is not configurable yet!",
+                                   agent_nsr.name, agent_vnfr.name)
+        except Exception as e:
+            self._log.error("Rift config agent: Error on initial configuration to VNF:{}/{}, e {}"
+                            .format(agent_nsr.name, agent_vnfr.name, str(e)))
+            
+            self._log.exception(e)
+            return rc
+
+        return rc
+
+    def is_vnfr_managed(self, vnfr_id):
+        try:
+            if vnfr_id in self._rift_vnfs:
+                return True
+        except Exception as e:
+            self._log.debug("Rift config agent: Is VNFR {} managed: {}".
+                            format(vnfr_id, e))
+        return False
+
+    def add_vnfr_managed(self, agent_vnfr):
+        if agent_vnfr.id not in self._rift_vnfs.keys():
+            self._log.info("Rift config agent: add vnfr={}/{}".format(agent_vnfr.name, agent_vnfr.id))
+            self._rift_vnfs[agent_vnfr.id] = agent_vnfr
+
+    @asyncio.coroutine
+    def get_config_status(self, agent_nsr, agent_vnfr):
+            if agent_vnfr.id in self._rift_vnfs.keys():
+                return 'configured'
+            return 'unknown'
+
+
+    def get_action_status(self, execution_id):
+        ''' Get the action status for an execution ID
+            *** Make sure this is NOT a asyncio coroutine function ***
+        '''
+        return None
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/RiftCM_rpc.py
new file mode 100644 (file)
index 0000000..9155d84
--- /dev/null
@@ -0,0 +1,350 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import time
+
+import rift.mano.config_agent
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    NsrYang,
+)
+
+class RiftCMRPCHandler(object):
+    """ The Network service Monitor DTS handler """
+    EXEC_NS_CONF_XPATH = "I,/nsr:exec-ns-service-primitive"
+    EXEC_NS_CONF_O_XPATH = "O,/nsr:exec-ns-service-primitive"
+
+    GET_NS_CONF_XPATH = "I,/nsr:get-ns-service-primitive-values"
+    GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-service-primitive-values"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._ns_regh = None
+        self._vnf_regh = None
+        self._get_ns_conf_regh = None
+
+        self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm)
+
+    @property
+    def reghs(self):
+        """ Return registration handles """
+        return (self._ns_regh, self._vnf_regh, self._get_ns_conf_regh)
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    def prepare_meta(self, rpc_ip):
+
+        try:
+            nsr_id = rpc_ip.nsr_id_ref
+            nsr = self._nsm.nsrs[nsr_id]
+            vnfrs = {}
+            for vnfr in nsr.vnfrs:
+                vnfr_id = vnfr.id
+                # vnfr is a dict containing all attributes
+                vnfrs[vnfr_id] = vnfr
+
+            return nsr, vnfrs
+        except KeyError as e:
+            raise ValueError("Record not found", str(e))
+
+    @asyncio.coroutine
+    def _get_ns_cfg_primitive(self, nsr_id, ns_cfg_name):
+        nsd_msg = yield from self._nsm.get_nsd(nsr_id)
+
+        def get_nsd_cfg_prim(name):
+            for ns_cfg_prim in nsd_msg.service_primitive:
+                if ns_cfg_prim.name == name:
+                    return ns_cfg_prim
+            return None
+
+        ns_cfg_prim_msg = get_nsd_cfg_prim(ns_cfg_name)
+        if ns_cfg_prim_msg is not None:
+            ret_cfg_prim_msg = ns_cfg_prim_msg.deep_copy()
+            return ret_cfg_prim_msg
+        return None
+
+    @asyncio.coroutine
+    def _get_vnf_primitive(self, vnfr_id, nsr_id, primitive_name):
+        vnf = self._nsm.get_vnfr_msg(vnfr_id, nsr_id)
+        self._log.debug("vnfr_msg:%s", vnf)
+        if vnf:
+            self._log.debug("nsr/vnf {}/{}, vnf_configuration: %s",
+                            vnf.vnf_configuration)
+            for primitive in vnf.vnf_configuration.service_primitive:
+                if primitive.name == primitive_name:
+                    return primitive
+
+        raise ValueError("Could not find nsr/vnf {}/{} primitive {}"
+                         .format(nsr_id, vnfr_id, primitive_name))
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NS monitoring read from dts """
+        yield from self.job_manager.register()
+
+        @asyncio.coroutine
+        def on_ns_config_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts exec-ns-service-primitive"""
+            assert action == rwdts.QueryAction.RPC
+            rpc_ip = msg
+            rpc_op = NsrYang.YangOutput_Nsr_ExecNsServicePrimitive.from_dict({
+                    "triggered_by": rpc_ip.triggered_by,
+                    "create_time": int(time.time()),
+                    "parameter": [param.as_dict() for param in rpc_ip.parameter],
+                    "parameter_group": [pg.as_dict() for pg in rpc_ip.parameter_group]
+                })
+
+            try:
+                ns_cfg_prim_name = rpc_ip.name
+                nsr_id = rpc_ip.nsr_id_ref
+                nsr = self._nsm.nsrs[nsr_id]
+
+                nsd_cfg_prim_msg = yield from self._get_ns_cfg_primitive(nsr_id, ns_cfg_prim_name)
+
+                def find_nsd_vnf_prim_param_pool(vnf_index, vnf_prim_name, param_name):
+                    for vnf_prim_group in nsd_cfg_prim_msg.vnf_primitive_group:
+                        if vnf_prim_group.member_vnf_index_ref != vnf_index:
+                            continue
+
+                        for vnf_prim in vnf_prim_group.primitive:
+                            if vnf_prim.name != vnf_prim_name:
+                                continue
+
+                            try:
+                                nsr_param_pool = nsr.param_pools[pool_param.parameter_pool]
+                            except KeyError:
+                                raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool)
+
+                            self._log.debug("Found parameter pool %s for vnf index(%s), vnf_prim_name(%s), param_name(%s)",
+                                            nsr_param_pool, vnf_index, vnf_prim_name, param_name)
+                            return nsr_param_pool
+
+                    self._log.debug("Could not find parameter pool for vnf index(%s), vnf_prim_name(%s), param_name(%s)",
+                                vnf_index, vnf_prim_name, param_name)
+                    return None
+
+                rpc_op.nsr_id_ref = nsr_id
+                rpc_op.name = ns_cfg_prim_name
+
+                nsr, vnfrs = self.prepare_meta(rpc_ip)
+                rpc_op.job_id = nsr.job_id
+
+                # Copy over the NS level Parameters
+
+                # Give preference to user defined script.
+                if nsd_cfg_prim_msg and nsd_cfg_prim_msg.has_field("user_defined_script"):
+                    rpc_ip.user_defined_script = nsd_cfg_prim_msg.user_defined_script
+
+                    tasks = []
+                    for config_plugin in self.nsm.config_agent_plugins:
+                        task, err = yield from config_plugin.apply_ns_config(
+                            nsr,
+                            vnfrs,
+                            rpc_ip)
+                        tasks.append(task)
+                        if err:
+                            rpc_op.job_status_details = err.decode()
+
+                    self.job_manager.add_job(rpc_op, tasks)
+                else:
+                    # Otherwise create VNF primitives.
+                    for vnf in rpc_ip.vnf_list:
+                        vnf_op = rpc_op.vnf_out_list.add()
+                        vnf_member_idx = vnf.member_vnf_index_ref
+                        vnfr_id = vnf.vnfr_id_ref
+                        vnf_op.vnfr_id_ref = vnfr_id
+                        vnf_op.member_vnf_index_ref = vnf_member_idx
+
+                        for primitive in vnf.vnf_primitive:
+                            op_primitive = vnf_op.vnf_out_primitive.add()
+                            op_primitive.name = primitive.name
+                            op_primitive.execution_id = ''
+                            op_primitive.execution_status = 'completed'
+                            op_primitive.execution_error_details = ''
+
+                            # Copy over the VNF pimitive's input parameters
+                            for param in primitive.parameter:
+                                output_param = op_primitive.parameter.add()
+                                output_param.name = param.name
+                                output_param.value = param.value
+
+                            self._log.debug("%s:%s Got primitive %s:%s",
+                                            nsr_id, vnf.member_vnf_index_ref, primitive.name, primitive.parameter)
+
+                            nsd_vnf_primitive = yield from self._get_vnf_primitive(
+                                vnfr_id,
+                                nsr_id,
+                                primitive.name
+                            )
+                            for param in nsd_vnf_primitive.parameter:
+                                if not param.has_field("parameter_pool"):
+                                    continue
+
+                                try:
+                                    nsr_param_pool = nsr.param_pools[param.parameter_pool]
+                                except KeyError:
+                                    raise ValueError("Parameter pool %s does not exist in nsr" % param.parameter_pool)
+                                nsr_param_pool.add_used_value(param.value)
+
+                            for config_plugin in self.nsm.config_agent_plugins:
+                                yield from config_plugin.vnf_config_primitive(nsr_id,
+                                                                              vnfr_id,
+                                                                              primitive,
+                                                                              op_primitive)
+
+                    self.job_manager.add_job(rpc_op)
+
+                # Get NSD
+                # Find Config Primitive
+                # For each vnf-primitive with parameter pool
+                # Find parameter pool
+                # Add used value to the pool
+                self._log.debug("RPC output: {}".format(rpc_op))
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                        RiftCMRPCHandler.EXEC_NS_CONF_O_XPATH,
+                                        rpc_op)
+            except Exception as e:
+                self._log.error("Exception processing the "
+                                "exec-ns-service-primitive: {}".format(e))
+                self._log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK,
+                                        RiftCMRPCHandler.EXEC_NS_CONF_O_XPATH)
+
+        @asyncio.coroutine
+        def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg):
+            assert action == rwdts.QueryAction.RPC
+            nsr_id = msg.nsr_id_ref
+            cfg_prim_name = msg.name
+            try:
+                nsr = self._nsm.nsrs[nsr_id]
+
+                rpc_op = NsrYang.YangOutput_Nsr_GetNsServicePrimitiveValues()
+
+                ns_cfg_prim_msg = yield from self._get_ns_cfg_primitive(nsr_id, cfg_prim_name)
+
+                # Get pool values for NS-level parameters
+                for ns_param in ns_cfg_prim_msg.parameter:
+                    if not ns_param.has_field("parameter_pool"):
+                        continue
+
+                    try:
+                        nsr_param_pool = nsr.param_pools[ns_param.parameter_pool]
+                    except KeyError:
+                        raise ValueError("Parameter pool %s does not exist in nsr" % ns_param.parameter_pool)
+
+                    new_ns_param = rpc_op.ns_parameter.add()
+                    new_ns_param.name = ns_param.name
+                    new_ns_param.value = str(nsr_param_pool.get_next_unused_value())
+
+                # Get pool values for NS-level parameters
+                for vnf_prim_group in ns_cfg_prim_msg.vnf_primitive_group:
+                    rsp_prim_group = rpc_op.vnf_primitive_group.add()
+                    rsp_prim_group.member_vnf_index_ref = vnf_prim_group.member_vnf_index_ref
+                    if vnf_prim_group.has_field("vnfd_id_ref"):
+                        rsp_prim_group.vnfd_id_ref = vnf_prim_group.vnfd_id_ref
+
+                    for index, vnf_prim in enumerate(vnf_prim_group.primitive):
+                        rsp_prim = rsp_prim_group.primitive.add()
+                        rsp_prim.name = vnf_prim.name
+                        rsp_prim.index = index
+                        vnf_primitive = yield from self._get_vnf_primitive(
+                                vnf_prim_group.vnfd_id_ref,
+                                nsr_id,
+                                vnf_prim.name
+                        )
+                        for param in vnf_primitive.parameter:
+                            if not param.has_field("parameter_pool"):
+                                continue
+
+                # Get pool values for NS-level parameters
+                for ns_param in ns_cfg_prim_msg.parameter:
+                    if not ns_param.has_field("parameter_pool"):
+                        continue
+
+                    try:
+                        nsr_param_pool = nsr.param_pools[ns_param.parameter_pool]
+                    except KeyError:
+                        raise ValueError("Parameter pool %s does not exist in nsr" % ns_param.parameter_pool)
+
+                    new_ns_param = rpc_op.ns_parameter.add()
+                    new_ns_param.name = ns_param.name
+                    new_ns_param.value = str(nsr_param_pool.get_next_unused_value())
+
+                # Get pool values for NS-level parameters
+                for vnf_prim_group in ns_cfg_prim_msg.vnf_primitive_group:
+                    rsp_prim_group = rpc_op.vnf_primitive_group.add()
+                    rsp_prim_group.member_vnf_index_ref = vnf_prim_group.member_vnf_index_ref
+                    if vnf_prim_group.has_field("vnfd_id_ref"):
+                        rsp_prim_group.vnfd_id_ref = vnf_prim_group.vnfd_id_ref
+
+                    for index, vnf_prim in enumerate(vnf_prim_group.primitive):
+                        rsp_prim = rsp_prim_group.primitive.add()
+                        rsp_prim.name = vnf_prim.name
+                        rsp_prim.index = index
+                        vnf_primitive = yield from self._get_vnf_primitive(
+                                nsr_id,
+                                vnf_prim_group.member_vnf_index_ref,
+                                vnf_prim.name
+                                )
+                        for param in vnf_primitive.parameter:
+                            if not param.has_field("parameter_pool"):
+                                continue
+
+                            try:
+                                nsr_param_pool = nsr.param_pools[param.parameter_pool]
+                            except KeyError:
+                                raise ValueError("Parameter pool %s does not exist in nsr" % vnf_prim.parameter_pool)
+
+                            vnf_param = rsp_prim.parameter.add()
+                            vnf_param.name = param.name
+                            vnf_param.value = str(nsr_param_pool.get_next_unused_value())
+
+                self._log.debug("RPC output: {}".format(rpc_op))
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                        RiftCMRPCHandler.GET_NS_CONF_O_XPATH, rpc_op)
+            except Exception as e:
+                self._log.error("Exception processing the "
+                                "get-ns-service-primitive-values: {}".format(e))
+                self._log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK,
+                                        RiftCMRPCHandler.GET_NS_CONF_O_XPATH)
+
+        hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,)
+        hdl_ns_get = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_get_ns_config_values_prepare,)
+
+        with self._dts.group_create() as group:
+            self._ns_regh = group.register(xpath=RiftCMRPCHandler.EXEC_NS_CONF_XPATH,
+                                           handler=hdl_ns,
+                                           flags=rwdts.Flag.PUBLISHER,
+                                           )
+            self._get_ns_conf_regh = group.register(xpath=RiftCMRPCHandler.GET_NS_CONF_XPATH,
+                                                    handler=hdl_ns_get,
+                                                    flags=rwdts.Flag.PUBLISHER,
+                                                    )
+
+
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/__init__.py
new file mode 100644 (file)
index 0000000..88db365
--- /dev/null
@@ -0,0 +1 @@
+from .rwconmantasklet import ConfigManagerTasklet
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/jujuconf.py
new file mode 100644 (file)
index 0000000..1c32fc9
--- /dev/null
@@ -0,0 +1,655 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import re
+import tempfile
+import yaml
+import os
+
+import rift.mano.utils.juju_api as juju
+from . import riftcm_config_plugin
+
+
+# Charm service name accepts only a to z and -.
+def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index):
+    name = "{}-{}-{}".format(nsr_name, vnfr_short_name, member_vnf_index)
+    new_name = ''
+    for c in name:
+        if c.isdigit():
+            c = chr(97 + int(c))
+        elif not c.isalpha():
+            c = "-"
+        new_name += c
+    return new_name.lower()
+
+
+class JujuConfigPlugin(riftcm_config_plugin.RiftCMConfigPluginBase):
+    """
+        Juju implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
+    """
+    def __init__(self, dts, log, loop, account):
+        riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+        self._name = account.name
+        self._type = 'juju'
+        self._ip_address = account.juju.ip_address
+        self._port = account.juju.port
+        self._user = account.juju.user
+        self._secret = account.juju.secret
+        self._rift_install_dir = os.environ['RIFT_INSTALL']
+        self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
+
+        ############################################################
+        # This is wrongfully overloaded with 'juju' private data.  #
+        # Really need to separate agent_vnfr from juju vnfr data.  #
+        # Currently, this holds agent_vnfr, which has actual vnfr, #
+        # then this juju overloads actual vnfr with its own        #
+        # dictionary elemetns (WRONG!!!)                           #
+        self._juju_vnfs = {}
+        ############################################################
+
+        self._tasks = {}
+        self._api = juju.JujuApi(log, loop,
+                                 self._ip_address, self._port,
+                                 self._user, self._secret)
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def agent_type(self):
+        return self._type
+
+    @property
+    def api(self):
+        return self._api
+
+    @property
+    def vnfr(self, vnfr_id):
+        try:
+            vnfr = self._juju_vnfs[vnfr_id].vnfr
+        except KeyError:
+            self._log.error("jujuCA: Did not find VNFR %s in juju plugin", vnfr_id)
+            return None
+
+        return vnfr
+
+    def juju_log(self, level, name, log_str, *args):
+        if name is not None:
+            g_log_str = 'jujuCA:({}) {}'.format(name, log_str)
+        else:
+            g_log_str = 'jujuCA: {}'.format(log_str)
+        getattr(self._log, level)(g_log_str, *args)
+
+    # TBD: Do a better, similar to config manager
+    def xlate(self, tag, tags):
+        # TBD
+        if tag is None:
+            return tag
+        val = tag
+        if re.search('<.*>', tag):
+            self._log.debug("jujuCA: Xlate value %s", tag)
+            try:
+                if tag == '<rw_mgmt_ip>':
+                    val = tags['rw_mgmt_ip']
+            except KeyError as e:
+                self._log.info("jujuCA: Did not get a value for tag %s, e=%s",
+                               tag, e)
+        return val
+
+    @asyncio.coroutine
+    def notify_create_vlr(self, agent_nsr, agent_vnfr, vld, vlr):
+        """
+        Notification of create VL record
+        """
+        return True
+
+    @asyncio.coroutine
+    def notify_create_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of create Network VNF record
+        Returns True if configured using config_agent
+        """
+        # Deploy the charm if specified for the vnf
+        self._log.debug("jujuCA: create vnfr nsr=%s  vnfr=%s",
+                        agent_nsr.name, agent_vnfr.name)
+        self._log.debug("jujuCA: Config = %s",
+                        agent_vnfr.vnf_configuration)
+        try:
+            vnf_config = agent_vnfr.vnfr_msg.vnf_configuration
+            self._log.debug("jujuCA: vnf_configuration = %s", vnf_config)
+            if not vnf_config.has_field('juju'):
+                return False
+            charm = vnf_config.juju.charm
+            self._log.debug("jujuCA: charm = %s", charm)
+        except Exception as e:
+            self._log.Error("jujuCA: vnf_configuration error for vnfr {}: {}".
+                            format(agent_vnfr.name, e))
+            return False
+
+        # Prepare unique name for this VNF
+        vnf_unique_name = get_vnf_unique_name(agent_nsr.name,
+                                              agent_vnfr.name,
+                                              agent_vnfr.member_vnf_index)
+        if vnf_unique_name in self._tasks:
+            self._log.warn("jujuCA: Service %s already deployed",
+                           vnf_unique_name)
+
+        vnfr_dict = agent_vnfr.vnfr
+        vnfr_dict.update({'vnf_juju_name': vnf_unique_name,
+                          'charm': charm,
+                          'nsr_id': agent_nsr.id,
+                          'member_vnf_index': agent_vnfr.member_vnf_index,
+                          'tags': {},
+                          'active': False,
+                          'config': vnf_config,
+                          'vnfr_name' : agent_vnfr.name})
+        self._log.debug("jujuCA: Charm %s for vnf %s to be deployed as %s",
+                        charm, agent_vnfr.name, vnf_unique_name)
+
+        # Find the charm directory
+        try:
+            path = os.path.join(self._rift_artif_dir,
+                                'launchpad/libs',
+                                agent_vnfr.vnfr_msg.vnfd_ref,
+                                'charms/trusty',
+                                charm)
+            self._log.debug("jujuCA: Charm dir is {}".format(path))
+            if not os.path.isdir(path):
+                self._log.error("jujuCA: Did not find the charm directory at {}".
+                                format(path))
+                path = None
+        except Exception as e:
+            self.log.exception(e)
+            return False
+
+        if vnf_unique_name not in self._tasks:
+            self._tasks[vnf_unique_name] = {}
+
+        self._tasks[vnf_unique_name]['deploy'] = self.loop.create_task(
+            self.api.deploy_service(charm, vnf_unique_name, path=path))
+
+        self._log.debug("jujuCA: Deploying service %s",
+                        vnf_unique_name)
+
+        return True
+
+    @asyncio.coroutine
+    def notify_instantiate_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of Instantiate NSR with the passed nsr id
+        """
+        return True
+
+    @asyncio.coroutine
+    def notify_instantiate_vlr(self, agent_nsr, agent_vnfr, vlr):
+        """
+        Notification of Instantiate NSR with the passed nsr id
+        """
+        return True
+
+    @asyncio.coroutine
+    def notify_terminate_nsr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of Terminate the network service
+        """
+        return True
+
+    @asyncio.coroutine
+    def notify_terminate_vnfr(self, agent_nsr, agent_vnfr):
+        """
+        Notification of Terminate the network service
+        """
+        self._log.debug("jujuCA: Terminate VNFr {}, current vnfrs={}".
+                        format(agent_vnfr.name, self._juju_vnfs))
+        try:
+            vnfr = agent_vnfr.vnfr
+            service = vnfr['vnf_juju_name']
+
+            self._log.debug ("jujuCA: Terminating VNFr %s, %s",
+                             agent_vnfr.name, service)
+            self._tasks[service]['destroy'] = self.loop.create_task(
+                    self.api.destroy_service(service)
+                )
+
+            del self._juju_vnfs[agent_vnfr.id]
+            self._log.debug ("jujuCA: current vnfrs={}".
+                             format(self._juju_vnfs))
+            if service in self._tasks:
+                tasks = []
+                for action in self._tasks[service].keys():
+                    #if self.check_task_status(service, action):
+                    tasks.append(action)
+                del tasks
+        except KeyError as e:
+            self._log.debug ("jujuCA: Termiating charm service for VNFr {}, e={}".
+                             format(agent_vnfr.name, e))
+        except Exception as e:
+            self._log.error("jujuCA: Exception terminating charm service for VNFR {}: {}".
+                            format(agent_vnfr.name, e))
+
+        return True
+
+    @asyncio.coroutine
+    def notify_terminate_vlr(self, agent_nsr, agent_vnfr, vlr):
+        """
+        Notification of Terminate the virtual link
+        """
+        return True
+
+    def check_task_status(self, service, action):
+        #self.log.debug("jujuCA: check task status for %s, %s" % (service, action))
+        try:
+            task = self._tasks[service][action]
+            if task.done():
+                self.log.debug("jujuCA: Task for %s, %s done" % (service, action))
+                e = task.exception()
+                if e:
+                    self.log.error("jujuCA: Error in task for {} and {} : {}".
+                                   format(service, action, e))
+                    raise Exception(e)
+                r= task.result()
+                if r:
+                    self.log.debug("jujuCA: Task for {} and {}, returned {}".
+                                   format(service, action,r))
+                return True
+            else:
+                self.log.debug("jujuCA: task {}, {} not done".
+                               format(service, action))
+                return False
+        except KeyError as e:
+            self.log.error("jujuCA: KeyError for task for {} and {}: {}".
+                           format(service, action, e))
+        except Exception as e:
+            self.log.error("jujuCA: Error for task for {} and {}: {}".
+                           format(service, action, e))
+            raise
+        return True
+
+    @asyncio.coroutine
+    def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
+        self._log.debug("jujuCA: VNF config primititve {} for nsr {}, vnfr_id {}".
+                        format(primitive, nsr_id, vnfr_id))
+        output.execution_status = "failed"
+        output.execution_id = ''
+        output.execution_error_details = ''
+
+        try:
+            vnfr = self._juju_vnfs[vnfr_id].vnfr
+        except KeyError:
+            self._log.error("jujuCA: Did not find VNFR %s in juju plugin",
+                            vnfr_id)
+            return
+
+        try:
+            service = vnfr['vnf_juju_name']
+            vnf_config = vnfr['config']
+            self._log.debug("VNF config %s", vnf_config)
+            configs = vnf_config.service_primitive
+            for config in configs:
+                if config.name == primitive.name:
+                    self._log.debug("jujuCA: Found the config primitive %s",
+                                    config.name)
+                    params = {}
+                    for parameter in primitive.parameter:
+                        if parameter.value:
+                            val = self.xlate(parameter.value, vnfr['tags'])
+                            # TBD do validation of the parameters
+                            data_type = 'string'
+                            found = False
+                            for ca_param in config.parameter:
+                                if ca_param.name == parameter.name:
+                                    data_type = ca_param.data_type
+                                    found = True
+                                    break
+                                if data_type == 'integer':
+                                    val = int(parameter.value)
+                            if not found:
+                                self._log.warn("jujuCA: Did not find parameter {} for {}".
+                                               format(parameter, config.name))
+                            params.update({parameter.name: val})
+
+                    if config.name == 'config':
+                        if len(params):
+                            self._log.debug("jujuCA: applying config with params {} for service {}".
+                                            format(params, service))
+
+                            rc = yield from self.api.apply_config(params, service=service)
+
+                            if rc:
+                                output.execution_status = "completed"
+                                self._log.debug("jujuCA: applied config {} on {}".
+                                                format(params, service))
+                            else:
+                                output.execution_status = 'failed'
+                                output.execution_error_Details = \
+                                    'Failed to apply config: {}'.format(params)
+                                self._log.error("jujuCA: Error applying config {} on service {}".
+                                                format(params, service))
+                        else:
+                            self._log.warn("jujuCA: Did not find valid paramaters for config : {}".
+                                           format(primitive.parameter))
+                    else:
+                        self._log.debug("jujuCA: Execute action {} on service {} with params {}".
+                                        format(config.name, service, params))
+
+                        resp = yield from self.api.execute_action(config.name,
+                                                                  params,
+                                                                  service=service)
+
+                        if resp:
+                            if 'error' in resp:
+                                output.execution_error_details = resp['error']['Message']
+                            else:
+                                output.execution_id = resp['action']['tag']
+                                output.execution_status = resp['status']
+                                if output.execution_status == 'failed':
+                                    output.execution_error_details = resp['message']
+                            self._log.debug("jujuCA: execute action {} on service {} returned {}".
+                                            format(config.name, service, output.execution_status))
+                        else:
+                            self._log.error("jujuCA: error executing action {} for {} with {}".
+                                            format(config.name, service, params))
+                            output.execution_id = ''
+                            output.execution_status = 'failed'
+                            output.execution_error_details = "Failed to queue the action"
+                    break
+
+        except KeyError as e:
+            self._log.info("VNF %s does not have config primititves, e=%s", vnfr_id, e)
+
+    @asyncio.coroutine
+    def apply_config(self, agent_nsr, agent_vnfr, config, rpc_ip):
+        """ Notification on configuration of an NSR """
+        pass
+
+    @asyncio.coroutine
+    def apply_ns_config(self, agent_nsr, agent_vnfrs, rpc_ip):
+        """
+
+        ###### TBD - This really does not belong here. Looks more like NS level script ####
+        ###### apply_config should be called for a particular VNF only here ###############
+
+        Hook: Runs the user defined script. Feeds all the necessary data
+        for the script thro' yaml file.
+
+        Args:
+            rpc_ip (YangInput_Nsr_ExecNsConfigPrimitive): The input data.
+            nsr (NetworkServiceRecord): Description
+            vnfrs (dict): VNFR ID => VirtualNetworkFunctionRecord
+
+        """
+        def get_meta(agent_nsr):
+            unit_names, initial_params, vnfr_index_map = {}, {}, {}
+
+            for vnfr_id in agent_nsr.vnfr_ids:
+                juju_vnf = self._juju_vnfs[vnfr_id].vnfr
+
+                # Vnfr -> index ref
+                vnfr_index_map[vnfr_id] = juju_vnf['member_vnf_index']
+
+                # Unit name
+                unit_names[vnfr_id] = juju_vnf['vnf_juju_name']
+
+                # Flatten the data for simplicity
+                param_data = {}
+                self._log.debug("Juju Config:%s", juju_vnf['config'])
+                for primitive in juju_vnf['config'].initial_config_primitive:
+                    for parameter in primitive.parameter:
+                        value = self.xlate(parameter.value, juju_vnf['tags'])
+                        param_data[parameter.name] = value
+
+                initial_params[vnfr_id] = param_data
+
+
+            return unit_names, initial_params, vnfr_index_map
+
+        unit_names, init_data, vnfr_index_map = get_meta(agent_nsr)
+
+        # The data consists of 4 sections
+        # 1. Account data
+        # 2. The input passed.
+        # 3. Juju unit names (keyed by vnfr ID).
+        # 4. Initial config data (keyed by vnfr ID).
+        data = dict()
+        data['config_agent'] = dict(
+                name=self._name,
+                host=self._ip_address,
+                port=self._port,
+                user=self._user,
+                secret=self._secret
+                )
+        data["rpc_ip"] = rpc_ip.as_dict()
+        data["unit_names"] = unit_names
+        data["init_config"] = init_data
+        data["vnfr_index_map"] = vnfr_index_map
+
+        tmp_file = None
+        with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+            tmp_file.write(yaml.dump(data, default_flow_style=True)
+                    .encode("UTF-8"))
+
+        self._log.debug("jujuCA: Creating a temp file: {} with input data".format(
+                tmp_file.name))
+
+        # Get the full path to the script
+        script = ''
+        if rpc_ip.user_defined_script[0] == '/':
+            # The script has full path, use as is
+            script = rpc_ip.user_defined_script
+        else:
+            script = os.path.join(self._rift_artif_dir, 'launchpad/libs', agent_nsr.id, 'scripts',
+                                  rpc_ip.user_defined_script)
+            self.log.debug("jujuCA: Checking for script in %s", script)
+            if not os.path.exists(script):
+                script = os.path.join(self._rift_install_dir, 'usr/bin', rpc_ip.user_defined_script)
+
+        cmd = "{} {}".format(rpc_ip.user_defined_script, tmp_file.name)
+        self._log.debug("jujuCA: Running the CMD: {}".format(cmd))
+
+        coro = asyncio.create_subprocess_shell(cmd, loop=self._loop,
+                                               stderr=asyncio.subprocess.PIPE)
+        process = yield from coro
+        err = yield from process.stderr.read()
+        task = self._loop.create_task(process.wait())
+
+        return task, err
+
+    @asyncio.coroutine
+    def apply_initial_config(self, agent_nsr, agent_vnfr):
+        """
+        Apply the initial configuration
+        Expect config directives mostly, not actions
+        Actions in initial config may not work based on charm design
+        """
+
+        vnfr = agent_vnfr.vnfr
+        service = vnfr['vnf_juju_name']
+
+        rc = yield from self.api.is_service_up(service=service)
+        if not rc:
+            return False
+
+        action_ids = []
+        try:
+            vnf_cat = agent_vnfr.vnfr_msg
+            if vnf_cat and vnf_cat.mgmt_interface.ip_address:
+                vnfr['tags'].update({'rw_mgmt_ip': vnf_cat.mgmt_interface.ip_address})
+                self._log.debug("jujuCA:(%s) tags: %s", vnfr['vnf_juju_name'], vnfr['tags'])
+
+            config = {}
+            try:
+                for primitive in vnfr['config'].initial_config_primitive:
+                    self._log.debug("jujuCA:(%s) Initial config primitive %s", vnfr['vnf_juju_name'], primitive)
+                    if primitive.name == 'config':
+                        for param in primitive.parameter:
+                            if vnfr['tags']:
+                                val = self.xlate(param.value, vnfr['tags'])
+                                config.update({param.name: val})
+            except KeyError as e:
+                self._log.exception("jujuCA:(%s) Initial config error(%s): config=%s",
+                                    vnfr['vnf_juju_name'], str(e), config)
+                config = None
+                return False
+
+            if config:
+                self.juju_log('info', vnfr['vnf_juju_name'],
+                              "Applying Initial config:%s",
+                              config)
+
+                rc = yield from self.api.apply_config(config, service=service)
+                if rc is False:
+                    self.log.error("Service {} is in error state".format(service))
+                    return False
+
+
+            # Apply any actions specified as part of initial config
+            for primitive in vnfr['config'].initial_config_primitive:
+                if primitive.name != 'config':
+                    self._log.debug("jujuCA:(%s) Initial config action primitive %s",
+                                    vnfr['vnf_juju_name'], primitive)
+                    action = primitive.name
+                    params = {}
+                    for param in primitive.parameter:
+                        val = self.xlate(param.value, vnfr['tags'])
+                        params.update({param.name: val})
+
+                    self._log.info("jujuCA:(%s) Action %s with params %s",
+                                   vnfr['vnf_juju_name'], action, params)
+
+                    resp = yield from self.api.execute_actions(action, params,
+                                                               service=service)
+                    if 'error' in resp:
+                        self._log.error("Applying initial config failed: {}".
+                                        format(resp))
+                        return False
+
+                    action_ids.append(resp['action']['tag'])
+
+        except KeyError as e:
+            self._log.info("Juju config agent(%s): VNFR %s not managed by Juju",
+                           vnfr['vnf_juju_name'], agent_vnfr.id)
+            return False
+        except Exception as e:
+            self._log.exception("jujuCA:(%s) Exception juju apply_initial_config for VNFR {}: {}".
+                                format(vnfr['vnf_juju_name'], agent_vnfr.id, e))
+            return False
+
+        # Check if all actions completed
+        pending = True
+        while pending:
+            pending = False
+            for act in action_ids:
+                resp = yield from self.api.get_action_status(act, service=service)
+                if 'error' in resp:
+                    self._log.error("Initial config failed: {}".format(resp))
+                    return False
+
+                if resp['status'] == 'failed':
+                    self._log.error("Initial config action failed: {}".format(resp))
+                    return False
+
+                if resp['status'] == 'pending':
+                    pending = True
+
+        return True
+
+    def add_vnfr_managed(self, agent_vnfr):
+        if agent_vnfr.id not in self._juju_vnfs.keys():
+            self._log.info("juju config agent: add vnfr={}/{}".
+                           format(agent_vnfr.name, agent_vnfr.id))
+            self._juju_vnfs[agent_vnfr.id] = agent_vnfr
+
+    def is_vnfr_managed(self, vnfr_id):
+        try:
+            if vnfr_id in self._juju_vnfs:
+                return True
+        except Exception as e:
+            self._log.debug("jujuCA: Is VNFR {} managed: {}".
+                            format(vnfr_id, e))
+        return False
+
+    @asyncio.coroutine
+    def is_configured(self, vnfr_id):
+        try:
+            agent_vnfr = self._juju_vnfs[vnfr_id]
+            vnfr = agent_vnfr.vnfr
+            if vnfr['active']:
+                return True
+
+            vnfr = self._juju_vnfs[vnfr_id].vnfr
+            service = vnfr['vnf_juju_name']
+            resp = self.api.is_service_active(service=service)
+            self._juju_vnfs[vnfr_id]['active'] = resp
+            self._log.debug("jujuCA: Service state for {} is {}".
+                            format(service, resp))
+            return resp
+
+        except KeyError:
+            self._log.debug("jujuCA: VNFR id {} not found in config agent".
+                            format(vnfr_id))
+            return False
+        except Exception as e:
+            self._log.error("jujuCA: VNFR id {} is_configured: {}".
+                            format(vnfr_id, e))
+        return False
+
+    @asyncio.coroutine
+    def get_config_status(self, agent_nsr, agent_vnfr):
+        """Get the configuration status for the VNF"""
+        rc = 'unknown'
+
+        try:
+            vnfr = agent_vnfr.vnfr
+            service = vnfr['vnf_juju_name']
+        except KeyError:
+            # This VNF is not managed by Juju
+            return rc
+
+        rc = 'configuring'
+
+        if not self.check_task_status(service, 'deploy'):
+            return rc
+
+        try:
+            resp = yield from self.api.get_service_status(service=service)
+            self._log.debug("jujuCA: Get service %s status? %s", service, resp)
+
+            if resp == 'error':
+                return 'error'
+            if resp == 'active':
+                return 'configured'
+        except KeyError:
+            self._log.error("jujuCA: Check unknown service %s status", service)
+        except Exception as e:
+            self._log.error("jujuCA: Caught exception when checking for service is active: %s", e)
+            self._log.exception(e)
+
+        return rc
+
+    def get_action_status(self, execution_id):
+        ''' Get the action status for an execution ID
+            *** Make sure this is NOT a asyncio coroutine function ***
+        '''
+
+        try:
+            return self.api._get_action_status(execution_id)
+        except Exception as e:
+            self._log.error("jujuCA: Error fetching execution status for %s",
+                            execution_id)
+            self._log.exception(e)
+            raise e
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/riftcm_config_plugin.py
new file mode 100644 (file)
index 0000000..1540360
--- /dev/null
@@ -0,0 +1,307 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import abc
+
+# Default config agent plugin type
+DEFAULT_CAP_TYPE = "riftca"
+
+class RiftCMnsr(object):
+    '''
+    Agent class for NSR
+    created for Agents to use objects from NSR
+    '''
+    def __init__(self, nsr_dict, cfg):
+        self._nsr = nsr_dict
+        self._cfg = cfg
+        self._vnfrs = []
+        self._vnfrs_msg = []
+        self._vnfr_ids = {}
+        self._job_id = 0
+
+    @property
+    def name(self):
+        return self._nsr['name_ref']
+
+    @property
+    def nsd_name(self):
+        return self._nsr['nsd_name_ref']
+
+    @property
+    def nsd_id(self):
+        return self._nsr['nsd_ref']
+
+    @property
+    def id(self):
+        return self._nsr['ns_instance_config_ref']
+
+    @property
+    def nsr_dict(self):
+        return self._nsr
+
+    @property
+    def nsr_cfg_msg(self):
+        return self._cfg
+
+    @property
+    def job_id(self):
+        ''' Get a new job id for config primitive'''
+        self._job_id += 1
+        return self._job_id
+
+    @property
+    def vnfrs(self):
+        return self._vnfrs
+
+    @property
+    def member_vnf_index(self):
+        return self._vnfr['member_vnf_index_ref']
+
+    def add_vnfr(self, vnfr, vnfr_msg):
+        if vnfr['id'] in self._vnfr_ids.keys():
+            agent_vnfr = self._vnfr_ids[vnfr['id']]
+        else:
+            agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg)
+            self._vnfrs.append(agent_vnfr)
+            self._vnfrs_msg.append(vnfr_msg)
+            self._vnfr_ids[agent_vnfr.id] = agent_vnfr
+        return agent_vnfr
+
+    @property
+    def vnfr_ids(self):
+        return self._vnfr_ids
+
+class RiftCMvnfr(object):
+    '''
+    Agent base class for VNFR processing
+    '''
+    def __init__(self, nsr_name, vnfr_dict, vnfr_msg):
+        self._vnfr = vnfr_dict
+        self._vnfr_msg = vnfr_msg
+        self._nsr_name = nsr_name
+        self._configurable = False
+
+    @property
+    def nsr_name(self):
+        return self._nsr_name
+
+    @property
+    def vnfr(self):
+        return self._vnfr
+
+    @property
+    def vnfr_msg(self):
+        return self._vnfr_msg
+
+    @property
+    def name(self):
+        return self._vnfr['short_name']
+
+    @property
+    def tags(self):
+        try:
+            return self._vnfr['tags']
+        except KeyError:
+            return None
+
+    @property
+    def id(self):
+        return self._vnfr['id']
+
+    @property
+    def member_vnf_index(self):
+        return self._vnfr['member_vnf_index_ref']
+
+    @property
+    def vnf_configuration(self):
+        return self._vnfr['vnf_configuration']
+
+    @property
+    def xpath(self):
+        """ VNFR xpath """
+        return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+
+    def set_to_configurable(self):
+        self._configurable = True
+
+    @property
+    def is_configurable(self):
+        return self._configurable
+
+    @property
+    def vnf_cfg(self):
+        return self._vnfr['vnf_cfg']
+
+class RiftCMConfigPluginBase(object):
+    """
+        Abstract base class for the NSM Configuration agent plugin.
+        There will be single instance of this plugin for each plugin type.
+    """
+
+    def __init__(self, dts, log, loop, config_agent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._config_agent = config_agent
+
+    @property
+    def agent_type(self):
+        raise NotImplementedError
+
+    @property
+    def name(self):
+        raise NotImplementedError
+
+    @property
+    def dts(self):
+        return self._dts
+
+    @property
+    def log(self):
+        return self._log
+
+    @property
+    def loop(self):
+        return self._loop
+
+    @property
+    def nsm(self):
+        return self._nsm
+
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def apply_config(self, agent_nsr, agent_vnfr, config, rpc_ip):
+        """ Notification on configuration of an NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def apply_ns_config(self, agent_nsr, agent_vnfrs, config, rpc_ip):
+        """ Notification on configuration of an NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_create_vlr(self, agent_nsr, vld):
+        """ Notification on creation of an VL """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_create_vnfr(self, agent_nsr, agent_vnfr):
+        """ Notification on creation of an VNFR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_instantiate_vnfr(self, agent_nsr, agent_vnfr):
+        """ Notify instantiation of the virtual network function """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_instantiate_vlr(self, agent_nsr, vl):
+        """ Notify instantiate of the virtual link"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_terminate_vnfr(self, agent_nsr, agent_vnfr):
+        """Notify termination of the VNF """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def notify_terminate_vlr(self, agent_nsr, vlr):
+        """Notify termination of the Virtual Link Record"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def apply_initial_config(self, vnfr_id, vnf):
+        """Apply initial configuration"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_config_status(self, vnfr_id):
+        """Get the status for the VNF"""
+        pass
+
+    @abc.abstractmethod
+    def get_action_status(self, execution_id):
+        """Get the action exection status"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
+        """Apply config primitive on a VNF"""
+        pass
+
+    @abc.abstractmethod
+    def is_vnfr_managed(self, vnfr_id):
+        """ Check if VNR is managed by config agent """
+        pass
+
+    @abc.abstractmethod
+    def add_vnfr_managed(self, agent_vnfr):
+        """ Add VNR to be managed by this config agent """
+        pass
+
+    @asyncio.coroutine
+    def invoke(self, method, *args):
+        try:
+            rc = None
+            self._log.debug("Config agent plugin: method {} with args {}: {}".
+                            format(method, args, self))
+
+            # TBD - Do a better way than string compare to find invoke the method
+            if method == 'notify_create_nsr':
+                rc = yield from self.notify_create_nsr(args[0], args[1])
+            elif method == 'notify_create_vlr':
+                rc = yield from self.notify_create_vlr(args[0], args[1], args[2])
+            elif method == 'notify_create_vnfr':
+                rc = yield from self.notify_create_vnfr(args[0], args[1])
+            elif method == 'notify_instantiate_nsr':
+                rc = yield from self.notify_instantiate_nsr(args[0])
+            elif method == 'notify_instantiate_vnfr':
+                rc = yield from self.notify_instantiate_vnfr(args[0], args[1])
+            elif method == 'notify_instantiate_vlr':
+                rc = yield from self.notify_instantiate_vlr(args[0], args[1])
+            elif method == 'notify_nsr_active':
+                rc = yield from self.notify_nsr_active(args[0], args[1])
+            elif method == 'notify_terminate_nsr':
+                rc = yield from self.notify_terminate_nsr(args[0])
+            elif method == 'notify_terminate_vnfr':
+                rc = yield from self.notify_terminate_vnfr(args[0], args[1])
+            elif method == 'notify_terminate_vlr':
+                rc = yield from self.notify_terminate_vlr(args[0], args[1])
+            elif method == 'apply_initial_config':
+                rc = yield from self.apply_initial_config(args[0], args[1])
+            elif method == 'apply_config':
+                rc = yield from self.apply_config(args[0], args[1], args[2])
+            elif method == 'get_config_status':
+                rc = yield from self.get_config_status(args[0], args[1])
+            else:
+                self._log.error("Unknown method %s invoked on config agent plugin",
+                                method)
+        except Exception as e:
+            self._log.error("Caught exception while invoking method: %s, Exception: %s", method, str(e))
+            raise
+        return rc
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_conagent.py
new file mode 100644 (file)
index 0000000..543e51b
--- /dev/null
@@ -0,0 +1,263 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import rift.tasklets
+
+from gi.repository import (
+    RwConfigAgentYang as rwcfg_agent,
+)
+
+from .riftcm_config_plugin import DEFAULT_CAP_TYPE
+from . import RiftCA
+from . import jujuconf
+import rift.mano.config_agent
+
+
+class ConfigAgentError(Exception):
+    pass
+
+
+class ConfigAgentExistsError(ConfigAgentError):
+    pass
+
+
+class UnknownAgentTypeError(Exception):
+    pass
+
+
+class ConfigAgentVnfrAddError(Exception):
+    pass
+
+
+class ConfigAgentVnfrTypeError(Exception):
+    pass
+
+
+class ConfigAccountHandler(object):
+    def __init__(self, dts, log, loop, on_add_config_agent, on_delete_config_agent):
+        self._log = log
+        self._dts = dts
+        self._loop = loop
+        self._on_add_config_agent = on_add_config_agent
+        self._on_delete_config_agent = on_delete_config_agent
+
+        self._log.debug("creating config account handler")
+        self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
+            self._dts, self._log,
+            rift.mano.config_agent.ConfigAgentCallbacks(
+                on_add_apply=self.on_config_account_added,
+                on_delete_apply=self.on_config_account_deleted,
+            )
+        )
+
+    def on_config_account_deleted(self, account):
+        self._log.debug("config account deleted: %s", account.name)
+        self._on_delete_config_agent(account)
+
+    def on_config_account_added(self, account):
+        self._log.debug("config account added")
+        self._log.debug(account.as_dict())
+        self._on_add_config_agent(account)
+
+    @asyncio.coroutine
+    def register(self):
+        self.cloud_cfg_handler.register()
+
+class RiftCMConfigPlugins(object):
+    """ NSM Config Agent Plugins """
+    def __init__(self):
+        self._plugin_classes = {
+            "juju": jujuconf.JujuConfigPlugin,
+            "riftca": RiftCA.RiftCAConfigPlugin,
+        }
+
+    @property
+    def plugins(self):
+        """ Plugin info """
+        return self._plugin_classes
+
+    def __getitem__(self, name):
+        """ Get item """
+        return self._plugin_classes[name]
+
+    def register(self, plugin_name, plugin_class, *args):
+        """ Register a plugin to this Nsm"""
+        self._plugin_classes[plugin_name] = plugin_class
+
+    def deregister(self, plugin_name, plugin_class, *args):
+        """ Deregister a plugin to this Nsm"""
+        if plugin_name in self._plugin_classes:
+            del self._plugin_classes[plugin_name]
+
+    def class_by_plugin_name(self, name):
+        """ Get class by plugin name """
+        return self._plugin_classes[name]
+
+
+class RiftCMConfigAgent(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._ConfigManagerConfig = parent
+
+        self._config_plugins = RiftCMConfigPlugins()
+        self._config_handler = ConfigAccountHandler(
+            self._dts, self._log, self._loop, self._on_config_agent, self._on_config_agent_delete)
+        self._plugin_instances = {}
+        self._default_account_added = False
+
+    @asyncio.coroutine
+    def invoke_config_agent_plugins(self, method, nsr, vnfr, *args):
+        # Invoke the methods on all config agent plugins registered
+        rc = False
+        for agent in self._plugin_instances.values():
+            if not agent.is_vnfr_managed(vnfr.id):
+                continue
+            try:
+                self._log.debug("Invoke {} on {}".format(method, agent.name))
+                rc = yield from agent.invoke(method, nsr, vnfr, *args)
+                break
+            except Exception as e:
+                self._log.error("Error invoking {} on {} : {}".
+                                format(method, agent.name, e))
+                raise
+
+        self._log.info("vnfr({}), method={}, return rc={}"
+                       .format(vnfr.name, method, rc))
+        return rc
+
+    def is_vnfr_config_agent_managed(self, vnfr):
+        if (not vnfr.has_field('netconf') and
+            not vnfr.has_field('juju') and
+            not vnfr.has_field('script')):
+            return False
+
+        for agent in self._plugin_instances.values():
+            try:
+                if agent.is_vnfr_managed(vnfr.id):
+                    return True
+            except Exception as e:
+                self._log.debug("Check if VNFR {} is config agent managed: {}".
+                                format(vnfr.name, e))
+        return False
+
+    def _on_config_agent(self, config_agent):
+        self._log.debug("Got nsm plugin config agent account: %s", config_agent)
+        try:
+            cap_name = config_agent.name
+            cap_inst = self._config_plugins.class_by_plugin_name(
+                config_agent.account_type)
+        except KeyError as e:
+            msg = "Config agent nsm plugin type not found: {}". \
+                format(config_agent.account_type)
+            self._log.error(msg)
+            raise UnknownAgentTypeError(msg)
+
+        # Check to see if the plugin was already instantiated
+        if cap_name in self._plugin_instances:
+            self._log.debug("Config agent nsm plugin {} already instantiated. " \
+                            "Using existing.". format(cap_name))
+        else:
+            # Otherwise, instantiate a new plugin using the config agent account
+            self._log.debug("Instantiting new config agent using class: %s", cap_inst)
+            new_instance = cap_inst(self._dts, self._log, self._loop, config_agent)
+            self._plugin_instances[cap_name] = new_instance
+
+        # TODO (pjoseph): See why this was added, as this deletes the
+        # Rift plugin account when Juju account is added
+        # if self._default_account_added:
+        #     # If the user has provided a config account, chuck the default one.
+        #     if self.DEFAULT_CAP_TYPE in self._plugin_instances:
+        #         del self._plugin_instances[self.DEFAULT_CAP_TYPE]
+
+    def _on_config_agent_delete(self, config_agent):
+        self._log.debug("Got nsm plugin config agent delete, account: %s, type: %s",
+                config_agent.name, config_agent.account_type)
+        cap_name = config_agent.account_type
+        if cap_name in self._plugin_instances:
+            self._log.debug("Config agent nsm plugin exists, deleting it.")
+            del self._plugin_instances[cap_name]
+        else:
+            self._log.error("Error deleting - Config Agent nsm plugin %s does not exist.", cap_name)
+
+
+    @asyncio.coroutine
+    def register(self):
+        self._log.debug("Registering for config agent nsm plugin manager")
+        yield from self._config_handler.register()
+
+        account = rwcfg_agent.ConfigAgentAccount()
+        account.account_type = DEFAULT_CAP_TYPE
+        account.name = "RiftCA"
+        self._on_config_agent(account)
+        self._default_account_added = True
+
+        # Also grab any account already configured
+        config_agents = yield from self._ConfigManagerConfig.cmdts_obj.get_config_agents(name=None)
+        for account in config_agents:
+            self._on_config_agent(account)
+
+    def set_config_agent(self, nsr, vnfr, method):
+        if method == 'juju':
+            agent_type = 'juju'
+        elif method in ['netconf', 'script']:
+            agent_type = DEFAULT_CAP_TYPE
+        else:
+            msg = "Unsupported configuration method ({}) for VNF:{}/{}". \
+                  format(method, nsr.name, vnfr.name)
+            self._log.error(msg)
+            raise UnknownAgentTypeError(msg)
+
+        try:
+            acc_map = nsr.nsr_cfg_msg.vnf_cloud_account_map
+        except AttributeError:
+            self._log.debug("Did not find cloud account map for NS {}".
+                            format(nsr.name))
+            acc_map = []
+
+        for vnfd in acc_map:
+            if vnfd.config_agent_account is not None:
+                if vnfd.member_vnf_index_ref == vnfr.vnfr_msg.member_index:
+                    for agent in self._plugin_instances:
+                        # Find the plugin with the same name
+                        if agent == vnfd.config_agent_account:
+                            # Check if the types are same
+                            if  self._plugin_instances[agent].agent_type != agent_type:
+                                msg = "VNF {} specified config agent {} is not of type {}". \
+                                      format(vnfr.name, agent, agent_type)
+                                self._log.error(msg)
+                                raise ConfigAgentVnfrTypeError(msg)
+
+                            self._plugin_instances[agent].add_vnfr_managed(vnfr)
+                            self._log.debug("Added vnfr {} as config plugin {} managed".
+                                            format(vnfr.name, agent))
+                            return
+
+        # If no config agent specified for the VNF, use the
+        # first available of the same type
+        for agent in self._plugin_instances:
+            if self._plugin_instances[agent].agent_type == agent_type:
+                self._plugin_instances[agent].add_vnfr_managed(vnfr)
+                self._log.debug("Added vnfr {} as config plugin {} managed".
+                                format(vnfr.name, agent))
+                return
+
+        msg = "Error finding config agent of type {} for VNF {}". \
+              format(agent_type, vnfr.name)
+        self._log.error(msg)
+        raise ConfigAgentVnfrAddError(msg)
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_config.py
new file mode 100644 (file)
index 0000000..4848e9e
--- /dev/null
@@ -0,0 +1,1472 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import os
+import stat
+import subprocess
+import sys
+import tempfile
+import yaml
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwConmanYang as conmanY,
+    ProtobufC,
+)
+
+import rift.tasklets
+
+from . import rwconman_conagent as conagent
+from . import RiftCM_rpc
+from . import riftcm_config_plugin
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index):
+    return "{}.{}.{}".format(nsr_name, vnfr_short_name, member_vnf_index)
+
+class ConmanConfigError(Exception):
+    pass
+
+
+class InitialConfigError(ConmanConfigError):
+    pass
+
+
+def log_this_vnf(vnf_cfg):
+    log_vnf = ""
+    used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address']
+    for item in used_item_list:
+        if item in vnf_cfg:
+            if item == 'mgmt_ip_address':
+                log_vnf += "({})".format(vnf_cfg[item])
+            else:
+                log_vnf += "{}/".format(vnf_cfg[item])
+    return log_vnf
+
+class PretendNsm(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._nsrs = {}
+        self._nsr_dict = parent._nsr_dict
+        self._config_agent_plugins = []
+        self._nsd_msg = {}
+
+    @property
+    def nsrs(self):
+        # Expensive, instead use get_nsr, if you know id.
+        self._nsrs = {}
+        # Update the list of nsrs (agent nsr)
+        for id, nsr_obj in self._nsr_dict.items():
+            self._nsrs[id] = nsr_obj.agent_nsr
+        return self._nsrs
+
+    def get_nsr(self, nsr_id):
+        if nsr_id in self._nsr_dict:
+            nsr_obj = self._nsr_dict[nsr_id]
+            return nsr_obj._nsr
+        return None
+
+    def get_vnfr_msg(self, vnfr_id, nsr_id=None):
+        self._log.debug("get_vnfr_msg(vnfr=%s, nsr=%s)",
+                        vnfr_id, nsr_id)
+        found = False
+        if nsr_id:
+            if nsr_id in self._nsr_dict:
+                nsr_obj = self._nsr_dict[nsr_id]
+                if vnfr_id in nsr_obj._vnfr_dict:
+                    found = True
+        else:
+            for nsr_obj in self._nsr_dict.values():
+                if vnfr_id in nsr_obj._vnfr_dict:
+                    # Found it
+                    found = True
+                    break
+        if found:
+            vnf_cfg = nsr_obj._vnfr_dict[vnfr_id]['vnf_cfg']
+            return vnf_cfg['agent_vnfr'].vnfr_msg
+        else:
+            return None
+
+    @asyncio.coroutine
+    def get_nsd(self, nsr_id):
+        if nsr_id not in self._nsd_msg:
+            nsr_config = yield from self._parent.cmdts_obj.get_nsr_config(nsr_id)
+            self._nsd_msg[nsr_id] = nsr_config.nsd
+        return self._nsd_msg[nsr_id]
+
+    @property
+    def config_agent_plugins(self):
+        self._config_agent_plugins = []
+        for agent in self._parent._config_agent_mgr._plugin_instances.values():
+            self._config_agent_plugins.append(agent)
+        return self._config_agent_plugins
+
+class ConfigManagerConfig(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._nsr_dict = {}
+        self.pending_cfg = {}
+        self.terminate_cfg = {}
+        self.pending_tasks = [] # User for NSRid get retry
+                                # (mainly excercised at restart case)
+        self._config_xpath = "C,/cm-config"
+        self._opdata_xpath = "D,/rw-conman:cm-state"
+
+        self.cm_config = conmanY.SoConfig()
+        # RO specific configuration
+        self.ro_config = {}
+        for key in self.cm_config.ro_endpoint.fields:
+            self.ro_config[key] = None
+
+        # Initialize cm-state
+        self.cm_state = {}
+        self.cm_state['cm_nsr'] = []
+        self.cm_state['states'] = "Initialized"
+
+        # Initialize objects to register
+        self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts)
+        self._config_agent_mgr = conagent.RiftCMConfigAgent(
+            self._dts,
+            self._log,
+            self._loop,
+            self,
+        )
+        self.reg_handles = [
+            self.cmdts_obj,
+            self._config_agent_mgr,
+            RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop,
+                                        PretendNsm(
+                                            self._dts, self._log, self._loop, self)),
+        ]
+
+    def is_nsr_valid(self, nsr_id):
+        if nsr_id in self._nsr_dict:
+            return True
+        return False
+
+    def add_to_pending_tasks(self, task):
+        if self.pending_tasks:
+            for p_task in self.pending_tasks:
+                if p_task['nsrid'] == task['nsrid']:
+                    # Already queued
+                    return
+        try:
+            self.pending_tasks.append(task)
+            self._log.debug("add_to_pending_tasks (nsrid:%s)",
+                            task['nsrid'])
+            if len(self.pending_tasks) == 1:
+                self._loop.create_task(self.ConfigManagerConfig_pending_loop())
+                # TBD - change to info level
+                self._log.debug("Started pending_loop!")
+        except Exception as e:
+            self._log.error("Failed adding to pending tasks (%s)", str(e))
+
+    def del_from_pending_tasks(self, task):
+        try:
+            self.pending_tasks.remove(task)
+        except Exception as e:
+            self._log.error("Failed removing from pending tasks (%s)", str(e))
+
+    @asyncio.coroutine
+    def ConfigManagerConfig_pending_loop(self):
+        loop_sleep = 2
+        while True:
+            yield from asyncio.sleep(loop_sleep, loop=self._loop)
+            """
+            This pending task queue is ordred by events,
+            must finish previous task successfully to be able to go on to the next task
+            """
+            if self.pending_tasks:
+                self._log.debug("self.pending_tasks len=%s", len(self.pending_tasks))
+                task = self.pending_tasks[0]
+                done = False
+                if 'nsrid' in task:
+                    nsrid = task['nsrid']
+                    self._log.debug("Will execute pending task for NSR id(%s)", nsrid)
+                    try:
+                        # Try to configure this NSR
+                        task['retries'] -= 1
+                        done = yield from self.config_NSR(nsrid)
+                        self._log.info("self.config_NSR status=%s", done)
+
+                    except Exception as e:
+                        self._log.error("Failed(%s) configuring NSR(%s)," \
+                                        "retries remained:%d!",
+                                        str(e), nsrid, task['retries'])
+                    finally:
+                        self.pending_tasks.remove(task)
+
+                    if done:
+                        self._log.debug("Finished pending task NSR id(%s):", nsrid)
+                    else:
+                        self._log.error("Failed configuring NSR(%s), retries remained:%d!",
+                                        nsrid, task['retries'])
+
+                        # Failed, re-insert (append at the end)
+                        # this failed task to be retried later
+                        # If any retries remained.
+                        if task['retries']:
+                            self.pending_tasks.append(task)
+            else:
+                self._log.debug("Stopped pending_loop!")
+                break
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.register_cm_state_opdata()
+
+        # Initialize all handles that needs to be registered
+        for reg in self.reg_handles:
+            yield from reg.register()
+        
+    @asyncio.coroutine
+    def register_cm_state_opdata(self):
+
+        def state_to_string(state):
+            state_dict = {
+                conmanY.RecordState.INIT : "init",
+                conmanY.RecordState.RECEIVED : "received",
+                conmanY.RecordState.CFG_PROCESS : "cfg_process",
+                conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed",
+                conmanY.RecordState.CFG_SCHED : "cfg_sched",
+                conmanY.RecordState.CFG_DELAY : "cfg_delay",
+                conmanY.RecordState.CONNECTING : "connecting",
+                conmanY.RecordState.FAILED_CONNECTION : "failed_connection",
+                conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected",
+                conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected",
+                conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected",
+                conmanY.RecordState.CFG_SEND : "cfg_send",
+                conmanY.RecordState.CFG_FAILED : "cfg_failed",
+                conmanY.RecordState.READY_NO_CFG : "ready_no_cfg",
+                conmanY.RecordState.READY : "ready",
+                }
+            return state_dict[state]
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+
+            self._log.debug("Received cm-state: msg=%s, action=%s", msg, action)
+
+            if action == rwdts.QueryAction.READ:
+                show_output = conmanY.CmOpdata()
+                show_output.from_dict(self.cm_state)
+                self._log.debug("Responding to SHOW cm-state: %s", self.cm_state)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                        xpath=self._opdata_xpath,
+                                        msg=show_output)
+            else:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.info("Registering for cm-opdata xpath: %s",
+                        self._opdata_xpath)
+
+        try:
+            handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+            yield from self._dts.register(xpath=self._opdata_xpath,
+                                          handler=handler,
+                                          flags=rwdts.Flag.PUBLISHER)
+            self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath)
+        except Exception as e:
+            self._log.error("Failed to register for opdata as (%s)", e)
+
+    @asyncio.coroutine
+    def process_nsd_vnf_configuration(self, nsr_obj, vnfr):
+
+        def get_config_method(vnf_config):
+            cfg_types = ['netconf', 'juju', 'script']
+            for method in cfg_types:
+                if method in vnf_config:
+                    return method
+            return None
+            
+        def get_cfg_file_extension(method,  configuration_options):
+            ext_dict = {
+                "netconf" : "xml",
+                "script" : {
+                    "bash" : "sh",
+                    "expect" : "exp",
+                },
+                "juju" : "yml"
+            }
+
+            if method == "netconf":
+                return ext_dict[method]
+            elif method == "script":
+                return ext_dict[method][configuration_options['script_type']]
+            elif method == "juju":
+                return ext_dict[method]
+            else:
+                return "cfg"
+
+        # This is how the YAML file should look like,
+        # This routine will be called for each VNF, so keep appending the file.
+        # priority order is determined by the number,
+        # hence no need to generate the file in that order. A dictionary will be
+        # used that will take care of the order by number.
+        '''
+        1 : <== This is priority
+          name : trafsink_vnfd
+          member_vnf_index : 2
+          configuration_delay : 120
+          configuration_type : netconf
+          configuration_options :
+            username : admin
+            password : admin
+            port : 2022
+            target : running
+        2 :
+          name : trafgen_vnfd
+          member_vnf_index : 1
+          configuration_delay : 0
+          configuration_type : netconf
+          configuration_options :
+            username : admin
+            password : admin
+            port : 2022
+            target : running
+        '''
+
+        # Save some parameters needed as short cuts in flat structure (Also generated)
+        vnf_cfg = vnfr['vnf_cfg']
+        # Prepare unique name for this VNF
+        vnf_cfg['vnf_unique_name'] = get_vnf_unique_name(
+            vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref'])
+
+        nsr_obj.cfg_path_prefix = '{}/{}_{}'.format(
+            nsr_obj.this_nsr_dir, vnfr['short_name'], vnfr['member_vnf_index_ref'])
+        nsr_vnfr = '{}/{}_{}'.format(
+            vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref'])
+
+        # Get vnf_configuration from vnfr
+        vnf_config = vnfr['vnf_configuration']
+
+        self._log.debug("vnf_configuration = %s", vnf_config)
+
+        # Create priority dictionary
+        cfg_priority_order = 0
+        if ('config_attributes' in vnf_config and
+            'config_priority' in vnf_config['config_attributes']):
+            cfg_priority_order = vnf_config['config_attributes']['config_priority']
+
+        if cfg_priority_order not in nsr_obj.nsr_cfg_config_attributes_dict:
+            # No VNFR with this priority yet, initialize the list
+            nsr_obj.nsr_cfg_config_attributes_dict[cfg_priority_order] = []
+
+        method = get_config_method(vnf_config)
+        if method is not None:
+            # Create all sub dictionaries first
+            config_priority = {
+                'id' : vnfr['id'],
+                'name' : vnfr['short_name'],
+                'member_vnf_index' : vnfr['member_vnf_index_ref'],
+            }
+
+            if 'config_delay' in vnf_config['config_attributes']:
+                config_priority['configuration_delay'] = vnf_config['config_attributes']['config_delay']
+                vnf_cfg['config_delay'] = config_priority['configuration_delay']
+
+            configuration_options = {}
+            self._log.debug("config method=%s", method)
+            config_priority['configuration_type'] = method
+            vnf_cfg['config_method'] = method
+
+            # Set config agent based on method
+            self._config_agent_mgr.set_config_agent(
+                nsr_obj.agent_nsr, vnf_cfg['agent_vnfr'], method)
+
+            cfg_opt_list = [
+                'port', 'target', 'script_type', 'ip_address', 'user', 'secret',
+            ]
+            for cfg_opt in cfg_opt_list:
+                if cfg_opt in vnf_config[method]:
+                    configuration_options[cfg_opt] = vnf_config[method][cfg_opt]
+                    vnf_cfg[cfg_opt] = configuration_options[cfg_opt]
+
+            cfg_opt_list = ['mgmt_ip_address', 'username', 'password']
+            for cfg_opt in cfg_opt_list:
+                if cfg_opt in vnf_config['config_access']:
+                    configuration_options[cfg_opt] = vnf_config['config_access'][cfg_opt]
+                    vnf_cfg[cfg_opt] = configuration_options[cfg_opt]
+
+            # Add to the cp_dict
+            vnf_cp_dict = nsr_obj._cp_dict[vnfr['member_vnf_index_ref']]
+            vnf_cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address']
+            vnf_cp_dict['rw_username'] = vnf_cfg['username']
+            vnf_cp_dict['rw_password'] = vnf_cfg['password']
+            
+
+            # TBD - see if we can neatly include the config in "config_attributes" file, no need though
+            #config_priority['config_template'] = vnf_config['config_template']
+            # Create config file
+            vnf_cfg['juju_script'] = os.path.join(self._parent.cfg_dir, 'juju_if.py')
+
+            if 'config_template' in vnf_config:
+                vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(nsr_obj.cfg_path_prefix, config_priority['configuration_type'])
+                vnf_cfg['cfg_file'] = '{}.{}'.format(nsr_obj.cfg_path_prefix, get_cfg_file_extension(method, configuration_options))
+                vnf_cfg['xlate_script'] = os.path.join(self._parent.cfg_dir, 'xlate_cfg.py')
+                try:
+                    # Now write this template into file
+                    with open(vnf_cfg['cfg_template'], "w") as cf:
+                        cf.write(vnf_config['config_template'])
+                except Exception as e:
+                    self._log.error("Processing NSD, failed to generate configuration template : %s (Error : %s)",
+                                    vnf_config['config_template'], str(e))
+                    raise
+
+            self._log.debug("VNF endpoint so far: %s", vnf_cfg)
+
+            # Populate filled up dictionary
+            config_priority['configuration_options'] = configuration_options
+            nsr_obj.nsr_cfg_config_attributes_dict[cfg_priority_order].append(config_priority)
+            nsr_obj.num_vnfs_to_cfg += 1
+            nsr_obj._vnfr_dict[vnf_cfg['vnf_unique_name']] = vnfr
+            nsr_obj._vnfr_dict[vnfr['id']] = vnfr
+
+            self._log.debug("VNF:(%s) config_attributes = %s",
+                            log_this_vnf(vnfr['vnf_cfg']),
+                            nsr_obj.nsr_cfg_config_attributes_dict)
+        else:
+            self._log.info("VNF:(%s) is not to be configured by Configuration Manager!",
+                           log_this_vnf(vnfr['vnf_cfg']))
+            yield from nsr_obj.update_vnf_cm_state(vnfr, conmanY.RecordState.READY_NO_CFG)
+
+        # Update the cm-state
+        nsr_obj.populate_vm_state_from_vnf_cfg()
+
+    @asyncio.coroutine
+    def config_NSR(self, id):
+
+        def my_yaml_dump(config_attributes_dict, yf):
+
+            yaml_dict = dict(sorted(config_attributes_dict.items()))
+            yf.write(yaml.dump(yaml_dict, default_flow_style=False))
+        
+        nsr_dict = self._nsr_dict
+        self._log.info("Configure NSR, id = %s", id)
+
+        #####################TBD###########################
+        # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_create_nsr', self.id, self._nsd)
+        # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_nsr_active', self.id, self._vnfrs)
+        
+        try:
+            if id not in nsr_dict:
+                nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id)
+                nsr_dict[id] = nsr_obj
+            else:
+                self._log.info("NSR(%s) is already initialized!", id)
+                nsr_obj = nsr_dict[id]
+        except Exception as e:
+            self._log.error("Failed creating NSR object for (%s) as (%s)", id, str(e))
+            raise
+
+        # Try to configure this NSR only if not already processed
+        if nsr_obj.cm_nsr['state'] != nsr_obj.state_to_string(conmanY.RecordState.INIT):
+            self._log.debug("NSR(%s) is already processed, state=%s",
+                            nsr_obj.nsr_name, nsr_obj.cm_nsr['state'])
+            yield from nsr_obj.publish_cm_state()
+            return True
+
+        cmdts_obj = self.cmdts_obj
+        try:
+            # Fetch NSR
+            nsr = yield from cmdts_obj.get_nsr(id)
+            self._log.debug("Full NSR : %s", nsr)
+            if nsr['operational_status'] != "running":
+                self._log.info("NSR(%s) is not ready yet!", nsr['nsd_name_ref'])
+                return False
+            self._nsr = nsr
+
+            # Create Agent NSR class
+            nsr_config = yield from cmdts_obj.get_nsr_config(id)
+            self._log.debug("NSR {} config: {}".format(id, nsr_config))
+            nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config)
+
+            try:
+                yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED)
+
+                # Parse NSR
+                if nsr is not None:
+                    nsr_obj.set_nsr_name(nsr['nsd_name_ref'])
+                    nsr_dir = os.path.join(self._parent.cfg_dir, nsr_obj.nsr_name)
+                    self._log.info("Checking NS config directory: %s", nsr_dir)
+                    if not os.path.isdir(nsr_dir):
+                        os.makedirs(nsr_dir)
+                        # self._log.critical("NS %s is not to be configured by Service Orchestrator!", nsr_obj.nsr_name)
+                        # yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.READY_NO_CFG)
+                        # return
+
+                    nsr_obj.set_config_dir(self)
+                    
+                    for const_vnfr in nsr['constituent_vnfr_ref']:
+                        self._log.debug("Fetching VNFR (%s)", const_vnfr['vnfr_id'])
+                        vnfr_msg = yield from cmdts_obj.get_vnfr(const_vnfr['vnfr_id'])
+                        if vnfr_msg:
+                            vnfr = vnfr_msg.as_dict()
+                            self._log.info("create VNF:{}/{}".format(nsr_obj.nsr_name, vnfr['short_name']))
+                            agent_vnfr = yield from nsr_obj.add_vnfr(vnfr, vnfr_msg)
+
+                            # Preserve order, self.process_nsd_vnf_configuration()
+                            # sets up the config agent based on the method
+                            yield from self.process_nsd_vnf_configuration(nsr_obj, vnfr)
+                            yield from self._config_agent_mgr.invoke_config_agent_plugins(
+                                'notify_create_vnfr',
+                                nsr_obj.agent_nsr,
+                                agent_vnfr)
+
+                        #####################TBD###########################
+                        # self._log.debug("VNF active. Apply initial config for vnfr {}".format(vnfr.name))
+                        # yield from self._config_agent_mgr.invoke_config_agent_plugins('apply_initial_config',
+                        #                                             vnfr.id, vnfr)
+                        # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_vnf', self.id, vnfr)
+
+            except Exception as e:
+                self._log.error("Failed processing NSR (%s) as (%s)", nsr_obj.nsr_name, str(e))
+                self._log.exception(e)
+                yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
+                raise
+
+            try:
+                # Generate config_config_attributes.yaml (For debug reference)
+                with open(nsr_obj.config_attributes_file, "w") as yf:
+                    my_yaml_dump(nsr_obj.nsr_cfg_config_attributes_dict, yf)
+            except Exception as e:
+                self._log.error("NS:(%s) failed to write config attributes file as (%s)", nsr_obj.nsr_name, str(e))
+
+            try:
+                # Generate nsr_xlate_dict.yaml (For debug reference)
+                with open(nsr_obj.xlate_dict_file, "w") as yf:
+                    yf.write(yaml.dump(nsr_obj._cp_dict, default_flow_style=False))
+            except Exception as e:
+                self._log.error("NS:(%s) failed to write nsr xlate tags file as (%s)", nsr_obj.nsr_name, str(e))
+
+            self._log.debug("Starting to configure each VNF")
+
+            # Check if this NS has input parametrs
+            self._log.info("Checking NS configuration order: %s", nsr_obj.config_attributes_file)
+
+            if os.path.exists(nsr_obj.config_attributes_file):
+                # Apply configuration is specified order
+                try:
+                    # Go in loop to configure by specified order
+                    self._log.info("Using Dynamic configuration input parametrs for NS: %s", nsr_obj.nsr_name)
+
+                    # cfg_delay = nsr_obj.nsr_cfg_config_attributes_dict['configuration_delay']
+                    # if cfg_delay:
+                    #     self._log.info("Applying configuration delay for NS (%s) ; %d seconds",
+                    #                    nsr_obj.nsr_name, cfg_delay)
+                    #     yield from asyncio.sleep(cfg_delay, loop=self._loop)
+
+                    for config_attributes_dict in nsr_obj.nsr_cfg_config_attributes_dict.values():
+                        # Iterate through each priority level
+                        for vnf_config_attributes_dict in config_attributes_dict:
+                            # Iterate through each vnfr at this priority level
+                                
+                            # Make up vnf_unique_name with vnfd name and member index
+                            #vnfr_name = "{}.{}".format(nsr_obj.nsr_name, vnf_config_attributes_dict['name'])
+                            vnf_unique_name = get_vnf_unique_name(
+                                nsr_obj.nsr_name,
+                                vnf_config_attributes_dict['name'],
+                                str(vnf_config_attributes_dict['member_vnf_index']),
+                            )
+                            self._log.info("NS (%s) : VNF (%s) - Processing configuration attributes",
+                                           nsr_obj.nsr_name, vnf_unique_name)
+
+                            # Find vnfr for this vnf_unique_name
+                            if vnf_unique_name not in nsr_obj._vnfr_dict:
+                                self._log.error("NS (%s) - Can not find VNF to be configured: %s", nsr_obj.nsr_name, vnf_unique_name)
+                            else:
+                                # Save this unique VNF's config input parameters
+                                nsr_obj.vnf_config_attributes_dict[vnf_unique_name] = vnf_config_attributes_dict
+                                nsr_obj.ConfigVNF(nsr_obj._vnfr_dict[vnf_unique_name])
+
+                    # Now add the entire NS to the pending config list.
+                    self._log.info("Scheduling NSR:{} configuration".format(nsr_obj.nsr_name))
+                    self._parent.add_to_pending(nsr_obj)
+                    self._parent.add_nsr_obj(nsr_obj)
+
+                except Exception as e:
+                    self._log.error("Failed processing input parameters for NS (%s) as %s", nsr_obj.nsr_name, str(e))
+                    raise
+            else:
+                self._log.error("No configuration input parameters for NSR (%s)", nsr_obj.nsr_name)
+
+        except Exception as e:
+            self._log.error("Failed to configure NS (%s) as (%s)", nsr_obj.nsr_name, str(e))
+            yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
+            raise
+
+        return True
+
+    @asyncio.coroutine
+    def terminate_NSR(self, id):
+        nsr_dict = self._nsr_dict
+        if id not in nsr_dict:
+            self._log.error("NSR(%s) does not exist!", id)
+            return
+        else:
+            # Remove this NSR if we have it on pending task list
+            for task in self.pending_tasks:
+                if task['nsrid'] == id:
+                    self.del_from_pending_tasks(task)
+
+            # Remove this object from global list
+            nsr_obj = nsr_dict.pop(id, None)
+
+            # Remove this NS cm-state from global status list
+            self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
+
+            # Also remove any scheduled configuration event
+            for nsr_obj_p in self._parent.pending_cfg:
+                if nsr_obj_p == nsr_obj:
+                    assert id == nsr_obj_p._nsr_id
+                    #self._parent.pending_cfg.remove(nsr_obj_p)
+                    # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise)
+                    nsr_obj_p.being_deleted = True
+                    self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
+
+            self._parent.remove_nsr_obj(id)
+
+            # Call Config Agent to clean up for each VNF
+            for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
+                yield from self._config_agent_mgr.invoke_config_agent_plugins(
+                    'notify_terminate_vnfr',
+                    nsr_obj.agent_nsr,
+                    agent_vnfr)
+
+            # publish delete cm-state (cm-nsr)
+            yield from nsr_obj.delete_cm_nsr()
+
+            #####################TBD###########################
+            # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
+
+            self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+
+    @asyncio.coroutine
+    def process_ns_initial_config(self, nsr_obj):
+        '''Apply the initial-config-primitives specified in NSD'''
+
+        def get_input_file(parameters):
+            inp = {}
+
+            # Add NSR name to file
+            inp['nsr_name'] = nsr_obj.nsr_name
+
+            # TODO (pjoseph): Add config agents, we need to identify which all
+            # config agents are required from this NS and provide only those
+            inp['config-agent'] = {}
+
+            # Add parameters for initial config
+            inp['parameter'] = {}
+            for parameter in parameters:
+                try:
+                    inp['parameter'][parameter['name']] = parameter['value']
+                except KeyError as e:
+                    self._log.info("NSR {} initial config parameter {} with no value: {}".
+                                    format(nsr_obj.nsr_name, parameter, e))
+
+
+            # Add vnfrs specific data
+            inp['vnfr'] = {}
+            for vnfr in nsr_obj.vnfrs:
+                v = {}
+
+                v['name'] = vnfr['name']
+                v['mgmt_ip_address'] = vnfr['vnf_cfg']['mgmt_ip_address']
+                v['mgmt_port'] = vnfr['vnf_cfg']['port']
+
+                if 'dashboard_url' in vnfr:
+                    v['dashboard_url'] = vnfr['dashboard_url']
+
+                if 'connection_point' in vnfr:
+                    v['connection_point'] = []
+                    for cp in vnfr['connection_point']:
+                        v['connection_point'].append(
+                            {
+                                'name': cp['name'],
+                                'ip_address': cp['ip_address'],
+                            }
+                        )
+
+                v['vdur'] = []
+                vdu_data = [(vdu['name'], vdu['management_ip'], vdu['vm_management_ip'], vdu['id'])
+                        for vdu in vnfr['vdur']]
+
+                for data in vdu_data:
+                    data = dict(zip(['name', 'management_ip', 'vm_management_ip', 'id'] , data))
+                    v['vdur'].append(data)
+
+                inp['vnfr'][vnfr['member_vnf_index_ref']] = v
+
+            self._log.debug("Input data for NSR {}: {}".
+                            format(nsr_obj.nsr_name, inp))
+
+            # Convert to YAML string
+            yaml_string = yaml.dump(inp, default_flow_style=False)
+
+            # Write the inputs as yaml file
+            tmp_file = None
+            with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+                tmp_file.write(yaml_string.encode("UTF-8"))
+            self._log.debug("Input file created for NSR {}: {}".
+                            format(nsr_obj.nsr_name, tmp_file.name))
+
+            return tmp_file.name
+
+        def get_script_file(script_name, nsd_name, nsd_id):
+            # Get the full path to the script
+            script = ''
+            # If script name starts with /, assume it is full path
+            if script_name[0] == '/':
+                # The script has full path, use as is
+                script = script_name
+            else:
+                script = os.path.join(os.environ['RIFT_ARTIFACTS'],
+                                      'launchpad/packages/nsd',
+                                      nsd_id,
+                                      nsd_name,
+                                      'scripts',
+                                      script_name)
+                self._log.debug("Checking for script at %s", script)
+                if not os.path.exists(script):
+                    self._log.debug("Did not find script %s", script)
+                    script = os.path.join(os.environ['RIFT_INSTALL'],
+                                          'usr/bin',
+                                          script_name)
+
+                # Seen cases in jenkins, where the script execution fails
+                # with permission denied. Setting the permission on script
+                # to make sure it has execute permission
+                perm = os.stat(script).st_mode
+                if not (perm  &  stat.S_IXUSR):
+                    self._log.warn("NSR {} initial config script {} " \
+                                  "without execute permission: {}".
+                                  format(nsr_id, script, perm))
+                    os.chmod(script, perm | stat.S_IXUSR)
+                return script
+
+        nsr_id = nsr_obj.nsr_id
+        nsr_name = nsr_obj.nsr_name
+        self._log.debug("Apply initial config for NSR {}({})".
+                        format(nsr_name, nsr_id))
+
+        # Fetch NSR
+        nsr = yield from self.cmdts_obj.get_nsr(nsr_id)
+        if nsr is not None:
+            nsd = yield from self.cmdts_obj.get_nsd(nsr_id)
+
+            try:
+                # Check if initial config is present
+                # TODO (pjoseph): Sort based on seq
+                for conf in nsr['initial_config_primitive']:
+                    self._log.debug("Parameter conf: {}".
+                                    format(conf))
+
+                    parameters = []
+                    try:
+                        parameters = conf['parameter']
+                    except Exception as e:
+                        self._log.debug("Parameter conf: {}, e: {}".
+                                        format(conf, e))
+                        pass
+
+                    inp_file = get_input_file(parameters)
+
+                    script = get_script_file(conf['user_defined_script'],
+                                             nsd.name,
+                                             nsd.id)
+
+                    cmd = "{0} {1}".format(script, inp_file)
+                    self._log.debug("Running the CMD: {}".format(cmd))
+
+                    process = yield from asyncio. \
+                              create_subprocess_shell(cmd, loop=self._loop)
+                    yield from process.wait()
+                    if process.returncode:
+                        msg = "NSR {} initial config using {} failed with {}". \
+                              format(nsr_name, script, process.returncode)
+                        self._log.error(msg)
+                        raise InitialConfigError(msg)
+                    else:
+                        os.remove(inp_file)
+
+            except KeyError as e:
+                self._log.debug("Did not find initial config {}".
+                                format(e))
+
+
+class ConfigManagerNSR(object):
+    def __init__(self, log, loop, parent, id):
+        self._log = log
+        self._loop = loop
+        self._rwcal = None
+        self._vnfr_dict = {}
+        self._cp_dict = {}
+        self._nsr_id = id
+        self._parent = parent
+        self._log.info("Instantiated NSR entry for id=%s", id)
+        self.nsr_cfg_config_attributes_dict = {}
+        self.vnf_config_attributes_dict = {}
+        self.num_vnfs_to_cfg = 0
+        self._vnfr_list = []
+        self.vnf_cfg_list = []
+        self.this_nsr_dir = None
+        self.being_deleted = False
+        self.dts_obj = self._parent.cmdts_obj
+
+        # Initialize cm-state for this NS
+        self.cm_nsr = {}
+        self.cm_nsr['cm_vnfr'] = []
+        self.cm_nsr['id'] = id
+        self.cm_nsr['state'] = self.state_to_string(conmanY.RecordState.INIT)
+        self.cm_nsr['state_details'] = None
+
+        self.set_nsr_name('Not Set')
+
+        # Add this NSR cm-state object to global cm-state
+        parent.cm_state['cm_nsr'].append(self.cm_nsr)
+
+        # Place holders for NSR & VNFR classes
+        self.agent_nsr = None
+
+    @property
+    def nsr_opdata_xpath(self):
+        ''' Returns full xpath for this NSR cm-state opdata '''
+        return(
+            "D,/rw-conman:cm-state" +
+            "/rw-conman:cm-nsr[rw-conman:id='{}']"
+        ).format(self._nsr_id)
+
+    @property
+    def vnfrs(self):
+        return self._vnfr_list
+
+    @property
+    def parent(self):
+        return self._parent
+
+    @property
+    def nsr_id(self):
+        return self._nsr_id
+
+    @asyncio.coroutine
+    def publish_cm_state(self):
+        ''' This function publishes cm_state for this NSR '''
+
+        cm_state = conmanY.CmOpdata()
+        cm_state_nsr = cm_state.cm_nsr.add()
+        cm_state_nsr.from_dict(self.cm_nsr)
+        #with self._dts.transaction() as xact:
+        yield from self.dts_obj.update(self.nsr_opdata_xpath, cm_state_nsr)
+        self._log.info("Published cm-state with xpath %s and nsr %s",
+                       self.nsr_opdata_xpath,
+                       cm_state_nsr)
+
+    @asyncio.coroutine
+    def delete_cm_nsr(self):
+        ''' This function publishes cm_state for this NSR '''
+
+        yield from self.dts_obj.delete(self.nsr_opdata_xpath)
+        self._log.info("Deleted cm-nsr with xpath %s",
+                       self.nsr_opdata_xpath)
+
+    def set_nsr_name(self, name):
+        self.nsr_name = name
+        self.cm_nsr['name'] = name
+
+    def set_config_dir(self, caller):
+        self.this_nsr_dir = os.path.join(
+            caller._parent.cfg_dir, self.nsr_name, caller._nsr['name_ref'])
+        if not os.path.exists(self.this_nsr_dir):
+            os.makedirs(self.this_nsr_dir)
+            self._log.debug("NSR:(%s), Created configuration directory(%s)",
+                            caller._nsr['name_ref'], self.this_nsr_dir)
+        self.config_attributes_file = os.path.join(self.this_nsr_dir, "configuration_config_attributes.yml")
+        self.xlate_dict_file = os.path.join(self.this_nsr_dir, "nsr_xlate_dict.yml")
+        
+    def xlate_conf(self, vnfr, vnf_cfg):
+
+        # If configuration type is not already set, try to read from attributes
+        if vnf_cfg['interface_type'] is None:
+            # Prepare unique name for this VNF
+            vnf_unique_name = get_vnf_unique_name(
+                    vnf_cfg['nsr_name'],
+                    vnfr['short_name'],
+                    vnfr['member_vnf_index_ref'],
+                    )
+
+            # Find this particular (unique) VNF's config attributes
+            if (vnf_unique_name in self.vnf_config_attributes_dict):
+                vnf_cfg_config_attributes_dict = self.vnf_config_attributes_dict[vnf_unique_name]
+                vnf_cfg['interface_type'] = vnf_cfg_config_attributes_dict['configuration_type']
+                if 'configuration_options' in vnf_cfg_config_attributes_dict:
+                    cfg_opts = vnf_cfg_config_attributes_dict['configuration_options']
+                    for key, value in cfg_opts.items():
+                        vnf_cfg[key] = value
+
+        cfg_path_prefix = '{}/{}/{}_{}'.format(
+                self._parent._parent.cfg_dir,
+                vnf_cfg['nsr_name'],
+                vnfr['short_name'],
+                vnfr['member_vnf_index_ref'],
+                )
+
+        vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(cfg_path_prefix, vnf_cfg['interface_type'])
+        vnf_cfg['cfg_file'] = '{}.cfg'.format(cfg_path_prefix)
+        vnf_cfg['xlate_script'] = self._parent._parent.cfg_dir + '/xlate_cfg.py'
+
+        self._log.debug("VNF endpoint so far: %s", vnf_cfg)
+
+        self._log.info("Checking cfg_template %s", vnf_cfg['cfg_template'])
+        if os.path.exists(vnf_cfg['cfg_template']):
+            return True
+        return False
+
+    def ConfigVNF(self, vnfr):
+
+        vnf_cfg = vnfr['vnf_cfg']
+        vnf_cm_state = self.find_or_create_vnfr_cm_state(vnf_cfg)
+
+        if (vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY_NO_CFG)
+            or
+            vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY)):
+            self._log.warning("NS/VNF (%s/%s) is already configured! Skipped.", self.nsr_name, vnfr['short_name'])
+            return
+
+        #UPdate VNF state
+        vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS)
+
+        # Now translate the configuration for iP addresses
+        try:
+            # Add cp_dict members (TAGS) for this VNF
+            self._cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address']
+            self._cp_dict['rw_username'] = vnf_cfg['username']
+            self._cp_dict['rw_password'] = vnf_cfg['password']
+            ############################################################
+            # TBD - Need to lookup above 3 for a given VNF, not global #
+            # Once we do that no need to dump below file again before  #
+            # each VNF configuration translation.                      #
+            # This will require all existing config templates to be    #
+            # changed for above three tags to include member index     #
+            ############################################################
+            try:
+                nsr_obj = vnf_cfg['nsr_obj']
+                # Generate config_config_attributes.yaml (For debug reference)
+                with open(nsr_obj.xlate_dict_file, "w") as yf:
+                    yf.write(yaml.dump(nsr_obj._cp_dict, default_flow_style=False))
+            except Exception as e:
+                self._log.error("NS:(%s) failed to write nsr xlate tags file as (%s)", nsr_obj.nsr_name, str(e))
+            
+            if 'cfg_template' in vnf_cfg:
+                script_cmd = 'python3 {} -i {} -o {} -x "{}"'.format(vnf_cfg['xlate_script'], vnf_cfg['cfg_template'], vnf_cfg['cfg_file'], self.xlate_dict_file)
+                self._log.debug("xlate script command (%s)", script_cmd)
+                #xlate_msg = subprocess.check_output(script_cmd).decode('utf-8')
+                xlate_msg = subprocess.check_output(script_cmd, shell=True).decode('utf-8')
+                self._log.info("xlate script output (%s)", xlate_msg)
+        except Exception as e:
+            vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED)
+            self._log.error("Failed to execute translation script for VNF: %s with (%s)", log_this_vnf(vnf_cfg), str(e))
+            return
+
+        self._log.info("Applying config to VNF: %s = %s!", log_this_vnf(vnf_cfg), vnf_cfg)
+        try:
+            #self.vnf_cfg_list.append(vnf_cfg)
+            self._log.debug("Scheduled configuration!")
+            vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_SCHED)
+        except Exception as e:
+            self._log.error("Failed apply_vnf_config to VNF: %s as (%s)", log_this_vnf(vnf_cfg), str(e))
+            vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED)
+            raise
+
+    def add(self, nsr):
+        self._log.info("Adding NS Record for id=%s", id)
+        self._nsr = nsr
+
+    def sample_cm_state(self):
+        return (
+            {
+                'cm_nsr': [
+                    {
+                        'cm_vnfr': [
+                            {
+                                'cfg_location': 'location1',
+                                'cfg_type': 'script',
+                                'connection_point': [
+                                    {'ip_address': '1.1.1.1', 'name': 'vnf1cp1'},
+                                    {'ip_address': '1.1.1.2', 'name': 'vnf1cp2'}
+                                ],
+                                'id': 'vnfrid1',
+                                'mgmt_interface': {'ip_address': '7.1.1.1',
+                                                   'port': 1001},
+                                'name': 'vnfrname1',
+                                'state': 'init'
+                            },
+                            {
+                                'cfg_location': 'location2',
+                                'cfg_type': 'netconf',
+                                'connection_point': [{'ip_address': '2.1.1.1', 'name': 'vnf2cp1'},
+                                                     {'ip_address': '2.1.1.2', 'name': 'vnf2cp2'}],
+                                'id': 'vnfrid2',
+                                'mgmt_interface': {'ip_address': '7.1.1.2',
+                                                   'port': 1001},
+                                'name': 'vnfrname2',
+                                'state': 'init'}
+                        ],
+                        'id': 'nsrid1',
+                        'name': 'nsrname1',
+                        'state': 'init'}
+                ],
+                'states': 'Initialized, '
+            })
+
+    def populate_vm_state_from_vnf_cfg(self):
+        # Fill in each VNFR from this nsr object
+        vnfr_list = self._vnfr_list
+        for vnfr in vnfr_list:
+            vnf_cfg = vnfr['vnf_cfg']
+            vnf_cm_state = self.find_vnfr_cm_state(vnfr['id'])
+
+            if vnf_cm_state:
+                # Fill in VNF management interface
+                vnf_cm_state['mgmt_interface']['ip_address'] = vnf_cfg['mgmt_ip_address']
+                vnf_cm_state['mgmt_interface']['port'] = vnf_cfg['port']
+
+                # Fill in VNF configuration details
+                vnf_cm_state['cfg_type'] = vnf_cfg['config_method']
+                vnf_cm_state['cfg_location'] = vnf_cfg['cfg_file']
+
+                # Fill in each connection-point for this VNF
+                if "connection_point" in vnfr:
+                    cp_list = vnfr['connection_point']
+                    for cp_item_dict in cp_list:
+                        vnf_cm_state['connection_point'].append(
+                            {
+                                'name' : cp_item_dict['name'],
+                                'ip_address' : cp_item_dict['ip_address'],
+                            }
+                        )
+
+    def state_to_string(self, state):
+        state_dict = {
+            conmanY.RecordState.INIT : "init",
+            conmanY.RecordState.RECEIVED : "received",
+            conmanY.RecordState.CFG_PROCESS : "cfg_process",
+            conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed",
+            conmanY.RecordState.CFG_SCHED : "cfg_sched",
+            conmanY.RecordState.CFG_DELAY : "cfg_delay",
+            conmanY.RecordState.CONNECTING : "connecting",
+            conmanY.RecordState.FAILED_CONNECTION : "failed_connection",
+            conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected",
+            conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected",
+            conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected",
+            conmanY.RecordState.CFG_SEND : "cfg_send",
+            conmanY.RecordState.CFG_FAILED : "cfg_failed",
+            conmanY.RecordState.READY_NO_CFG : "ready_no_cfg",
+            conmanY.RecordState.READY : "ready",
+        }
+        return state_dict[state]
+
+    def find_vnfr_cm_state(self, id):
+        if self.cm_nsr['cm_vnfr']:
+            for vnf_cm_state in self.cm_nsr['cm_vnfr']:
+                if vnf_cm_state['id'] == id:
+                    return vnf_cm_state
+        return None
+
+    def find_or_create_vnfr_cm_state(self, vnf_cfg):
+        vnfr = vnf_cfg['vnfr']
+        vnf_cm_state = self.find_vnfr_cm_state(vnfr['id'])
+
+        if vnf_cm_state is None:
+            # Not found, Create and Initialize this VNF cm-state
+            vnf_cm_state = {
+                'id' : vnfr['id'],
+                'name' : vnfr['short_name'],
+                'state' : self.state_to_string(conmanY.RecordState.RECEIVED),
+                'mgmt_interface' :
+                {
+                    'ip_address' : vnf_cfg['mgmt_ip_address'],
+                    'port' : vnf_cfg['port'],
+                },
+                'cfg_type' : vnf_cfg['config_method'],
+                'cfg_location' : vnf_cfg['cfg_file'],
+                'connection_point' : [],
+            }
+            self.cm_nsr['cm_vnfr'].append(vnf_cm_state)
+
+            # Publish newly created cm-state
+
+
+        return vnf_cm_state
+
+    @asyncio.coroutine
+    def get_vnf_cm_state(self, vnfr):
+        if vnfr:
+            vnf_cm_state = self.find_vnfr_cm_state(vnfr['id'])
+            if vnf_cm_state:
+                return vnf_cm_state['state']
+        return False
+
+    @asyncio.coroutine
+    def update_vnf_cm_state(self, vnfr, state):
+        if vnfr:
+            vnf_cm_state = self.find_vnfr_cm_state(vnfr['id'])
+            if vnf_cm_state is None:
+                self._log.error("No opdata found for NS/VNF:%s/%s!",
+                                self.nsr_name, vnfr['short_name'])
+                return
+
+            if vnf_cm_state['state'] != self.state_to_string(state):
+                old_state = vnf_cm_state['state']
+                vnf_cm_state['state'] = self.state_to_string(state)
+                # Publish new state
+                yield from self.publish_cm_state()
+                self._log.info("VNF ({}/{}/{}) state change: {} -> {}"
+                               .format(self.nsr_name,
+                                       vnfr['short_name'],
+                                       vnfr['member_vnf_index_ref'],
+                                       old_state,
+                                       vnf_cm_state['state']))
+
+        else:
+            self._log.error("No VNFR supplied for state update (NS=%s)!",
+                            self.nsr_name)
+
+    @property
+    def get_ns_cm_state(self):
+        return self.cm_nsr['state']
+
+    @asyncio.coroutine
+    def update_ns_cm_state(self, state, state_details=None):
+        if self.cm_nsr['state'] != self.state_to_string(state):
+            old_state = self.cm_nsr['state']
+            self.cm_nsr['state'] = self.state_to_string(state)
+            self.cm_nsr['state_details'] = state_details if state_details is not None else None
+            self._log.info("NS ({}) state change: {} -> {}"
+                           .format(self.nsr_name,
+                                   old_state,
+                                   self.cm_nsr['state']))
+            # Publish new state
+            yield from self.publish_cm_state()
+
+    @asyncio.coroutine
+    def add_vnfr(self, vnfr, vnfr_msg):
+
+        @asyncio.coroutine
+        def populate_subnets_from_vlr(id):
+            try:
+                # Populate cp_dict with VLR subnet info
+                vlr = yield from self.dts_obj.get_vlr(id)
+                if vlr is not None and 'assigned_subnet' in vlr:
+                    subnet = {vlr.name:vlr.assigned_subnet}
+                    self._cp_dict[vnfr['member_vnf_index_ref']].update(subnet)
+                    self._cp_dict.update(subnet)
+                    self._log.debug("VNF:(%s) Updated assigned subnet = %s",
+                                    vnfr['short_name'], subnet)
+            except Exception as e:
+                self._log.error("VNF:(%s) VLR Error = %s",
+                                vnfr['short_name'], e)
+            
+        if vnfr['id'] not in self._vnfr_dict:
+            self._log.info("NSR(%s) : Adding VNF Record for name=%s, id=%s", self._nsr_id, vnfr['short_name'], vnfr['id'])
+            # Add this vnfr to the list for show, or single traversal
+            self._vnfr_list.append(vnfr)
+        else:
+            self._log.warning("NSR(%s) : VNF Record for name=%s, id=%s already exists, overwriting", self._nsr_id, vnfr['short_name'], vnfr['id'])
+
+        # Make vnfr available by id as well as by name
+        unique_name = get_vnf_unique_name(self.nsr_name, vnfr['short_name'], vnfr['member_vnf_index_ref'])
+        self._vnfr_dict[unique_name] = vnfr
+        self._vnfr_dict[vnfr['id']] = vnfr
+
+        # Create vnf_cfg dictionary with default values
+        vnf_cfg = {
+            'nsr_obj' : self,
+            'vnfr' : vnfr,
+            'agent_vnfr' : self.agent_nsr.add_vnfr(vnfr, vnfr_msg),
+            'nsr_name' : self.nsr_name,
+            'nsr_id' : self._nsr_id,
+            'vnfr_name' : vnfr['short_name'],
+            'member_vnf_index' : vnfr['member_vnf_index_ref'],
+            'port' : 0,
+            'username' : 'admin',
+            'password' : 'admin',
+            'config_method' : 'None',
+            'protocol' : 'None',
+            'mgmt_ip_address' : '0.0.0.0',
+            'cfg_file' : 'None',
+            'cfg_retries' : 0,
+            'script_type' : 'bash',
+        }
+
+        # Update the mgmt ip address
+        # In case the config method is none, this is not
+        # updated later
+        try:
+            vnf_cfg['mgmt_ip_address'] = vnfr_msg.mgmt_interface.ip_address
+            vnf_cfg['port'] = vnfr_msg.mgmt_interface.port
+        except Exception as e:
+            self._log.warn(
+                "VNFR {}({}), unable to retrieve mgmt ip address: {}".
+                format(vnfr['short_name'], vnfr['id'], e))
+
+        vnfr['vnf_cfg'] = vnf_cfg
+        self.find_or_create_vnfr_cm_state(vnf_cfg)
+
+        '''
+        Build the connection-points list for this VNF (self._cp_dict)
+        '''
+        # Populate global CP list self._cp_dict from VNFR
+        cp_list = []
+        if 'connection_point' in vnfr:
+            cp_list = vnfr['connection_point']
+
+        self._cp_dict[vnfr['member_vnf_index_ref']] = {}
+        if 'vdur' in vnfr:
+            for vdur in vnfr['vdur']:
+                if 'internal_connection_point' in vdur:
+                    cp_list += vdur['internal_connection_point']
+
+                for cp_item_dict in cp_list:
+                    # Populate global dictionary
+                    self._cp_dict[
+                        cp_item_dict['name']
+                    ] = cp_item_dict['ip_address']
+
+                    # Populate unique member specific dictionary
+                    self._cp_dict[
+                        vnfr['member_vnf_index_ref']
+                    ][
+                        cp_item_dict['name']
+                    ] = cp_item_dict['ip_address']
+
+                    # Fill in the subnets from vlr
+                    if 'vlr_ref' in cp_item_dict:
+                        ### HACK: Internal connection_point do not have VLR reference
+                        yield from populate_subnets_from_vlr(cp_item_dict['vlr_ref'])
+
+        if 'internal_vlr' in vnfr:
+            for ivlr in vnfr['internal_vlr']:
+                yield from populate_subnets_from_vlr(ivlr['vlr_ref'])
+                
+        # Update vnfr
+        vnf_cfg['agent_vnfr']._vnfr = vnfr
+        return vnf_cfg['agent_vnfr']
+
+
+class XPaths(object):
+    @staticmethod
+    def nsr_opdata(k=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+                ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsd_msg(k=None):
+        return ("C,/nsd:nsd-catalog/nsd:nsd" +
+                "[nsd:id = '{}']".format(k) if k is not None else "")
+
+    @staticmethod
+    def vnfr_opdata(k=None):
+        return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
+                ("[vnfr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def config_agent(k=None):
+        return ("D,/rw-config-agent:config-agent/rw-config-agent:account" +
+                ("[rw-config-agent:name='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_config(k=None):
+        return ("C,/nsr:ns-instance-config/nsr:nsr[nsr:id='{}']".format(k) if k is not None else "")
+
+    @staticmethod
+    def vlr(k=None):
+        return ("D,/vlr:vlr-catalog/vlr:vlr[vlr:id='{}']".format(k) if k is not None else "")
+
+class ConfigManagerDTS(object):
+    ''' This class either reads from DTS or publishes to DTS '''
+
+    def __init__(self, log, loop, parent, dts):
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._dts = dts
+
+    @asyncio.coroutine
+    def _read_dts(self, xpath, do_trace=False):
+        self._log.debug("_read_dts path = %s", xpath)
+        flags = rwdts.XactFlag.MERGE
+        res_iter = yield from self._dts.query_read(
+                xpath, flags=flags
+                )
+
+        results = []
+        try:
+            for i in res_iter:
+                result = yield from i
+                if result is not None:
+                    results.append(result.result)
+        except:
+            pass
+
+        return results
+
+
+    @asyncio.coroutine
+    def get_nsr(self, id):
+        self._log.debug("Attempting to get NSR: %s", id)
+        nsrl = yield from self._read_dts(XPaths.nsr_opdata(id), False)
+        nsr = None
+        if len(nsrl) > 0:
+            nsr =  nsrl[0].as_dict()
+        return nsr
+
+    @asyncio.coroutine
+    def get_nsr_config(self, id):
+        self._log.debug("Attempting to get config NSR: %s", id)
+        nsrl = yield from self._read_dts(XPaths.nsr_config(id), False)
+        nsr = None
+        if len(nsrl) > 0:
+            nsr =  nsrl[0]
+        return nsr
+
+    @asyncio.coroutine
+    def get_nsd_msg(self, id):
+        self._log.debug("Attempting to get NSD: %s", id)
+        nsdl = yield from self._read_dts(XPaths.nsd_msg(id), False)
+        nsd_msg = None
+        if len(nsdl) > 0:
+            nsd_msg =  nsdl[0]
+        return nsd_msg
+
+    @asyncio.coroutine
+    def get_nsd(self, nsr_id):
+        self._log.debug("Attempting to get NSD for NSR: %s", id)
+        nsr_config = yield from self.get_nsr_config(nsr_id)
+        nsd_msg = nsr_config.nsd
+        return nsd_msg
+
+    @asyncio.coroutine
+    def get_vnfr(self, id):
+        self._log.debug("Attempting to get VNFR: %s", id)
+        vnfrl = yield from self._read_dts(XPaths.vnfr_opdata(id), do_trace=False)
+        vnfr_msg = None
+        if len(vnfrl) > 0:
+            vnfr_msg = vnfrl[0]
+        return vnfr_msg
+
+    @asyncio.coroutine
+    def get_vlr(self, id):
+        self._log.debug("Attempting to get VLR subnet: %s", id)
+        vlrl = yield from self._read_dts(XPaths.vlr(id), do_trace=True)
+        vlr_msg = None
+        if len(vlrl) > 0:
+            vlr_msg = vlrl[0]
+        return vlr_msg
+
+    @asyncio.coroutine
+    def get_config_agents(self, name):
+        self._log.debug("Attempting to get config_agents: %s", name)
+        cfgagentl = yield from self._read_dts(XPaths.config_agent(name), False)
+        return cfgagentl
+
+    @asyncio.coroutine
+    def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+        """
+        Update a cm-state (cm-nsr) record in DTS with the path and message
+        """
+        self._log.debug("Updating cm-state %s:%s dts_pub_hdl = %s", path, msg, self.dts_pub_hdl)
+        self.dts_pub_hdl.update_element(path, msg, flags)
+        self._log.debug("Updated cm-state, %s:%s", path, msg)
+
+    @asyncio.coroutine
+    def delete(self, path):
+        """
+        Delete cm-nsr record in DTS with the path only
+        """
+        self._log.debug("Deleting cm-nsr %s dts_pub_hdl = %s", path, self.dts_pub_hdl)
+        self.dts_pub_hdl.delete_element(path)
+        self._log.debug("Deleted cm-nsr, %s", path)
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.register_to_publish()
+        yield from self.register_for_nsr()
+        
+    @asyncio.coroutine
+    def register_to_publish(self):
+        ''' Register to DTS for publishing cm-state opdata '''
+
+        xpath = "D,/rw-conman:cm-state/rw-conman:cm-nsr"
+        self._log.debug("Registering to publish cm-state @ %s", xpath)
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self.dts_pub_hdl = group.register(xpath=xpath,
+                                              handler=hdl,
+                                              flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
+
+    @property
+    def nsr_xpath(self):
+        return "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    @asyncio.coroutine
+    def register_for_nsr(self):
+        """ Register for NSR changes """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ This NSR is created """
+            self._log.debug("Received NSR instantiate on_prepare (%s:%s:%s)",
+                            query_action,
+                            ks_path,
+                            msg)
+
+            if (query_action == rwdts.QueryAction.UPDATE or
+                query_action == rwdts.QueryAction.CREATE):
+                msg_dict = msg.as_dict()
+                # Update Each NSR/VNFR state)
+                if ('operational_status' in msg_dict and
+                    msg_dict['operational_status'] == 'running'):
+                    # Add to the task list
+                    self._parent.add_to_pending_tasks({'nsrid' : msg_dict['ns_instance_config_ref'], 'retries' : 5})
+            elif query_action == rwdts.QueryAction.DELETE:
+                nsr_id = msg.ns_instance_config_ref
+                asyncio.ensure_future(self._parent.terminate_NSR(nsr_id), loop=self._loop)
+            else:
+                raise NotImplementedError(
+                    "%s action on cm-state not supported",
+                    query_action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        try:
+            handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+            self.dts_reg_hdl = yield from self._dts.register(self.nsr_xpath,
+                                                             flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                                                             handler=handler)
+        except Exception as e:
+            self._log.error("Failed to register for NSR changes as %s", str(e))
+
+
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_events.py
new file mode 100644 (file)
index 0000000..f292a68
--- /dev/null
@@ -0,0 +1,367 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import ncclient
+import ncclient.asyncio_manager
+import tornado.httpclient as tornadoh
+import asyncio.subprocess
+import asyncio
+import time
+import sys
+import os, stat
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwConmanYang as conmanY,
+    RwNsrYang as nsrY,
+    RwVnfrYang as vnfrY,
+)
+
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+def log_this_vnf(vnf_cfg):
+    log_vnf = ""
+    used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address']
+    for item in used_item_list:
+        if item in vnf_cfg:
+            if item == 'mgmt_ip_address':
+                log_vnf += "({})".format(vnf_cfg[item])
+            else:
+                log_vnf += "{}/".format(vnf_cfg[item])
+    return log_vnf
+        
+class ConfigManagerROifConnectionError(Exception):
+    pass
+class ScriptError(Exception):
+    pass
+
+
+class ConfigManagerEvents(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._nsr_xpath = "/cm-state/cm-nsr"
+
+    @asyncio.coroutine
+    def register(self):
+        pass
+
+    @asyncio.coroutine
+    def update_vnf_state(self, vnf_cfg, state):
+        nsr_obj = vnf_cfg['nsr_obj']
+        yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state)
+        
+    @asyncio.coroutine
+    def apply_vnf_config(self, vnf_cfg):
+        self._log.debug("apply_vnf_config VNF:{}"
+                        .format(log_this_vnf(vnf_cfg)))
+        
+        if vnf_cfg['config_delay']:
+            yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_DELAY)
+            yield from asyncio.sleep(vnf_cfg['config_delay'], loop=self._loop)
+            
+        # See if we are still alive!
+        if vnf_cfg['nsr_obj'].being_deleted:
+            # Don't do anything, just return
+            self._log.info("VNF : %s is being deleted, skipping configuration!",
+                           log_this_vnf(vnf_cfg))
+            return True
+            
+        yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_SEND)
+        try:
+            if vnf_cfg['config_method'] == 'netconf':
+                self._log.info("Creating ncc handle for VNF cfg = %s!", vnf_cfg)
+                self.ncc = ConfigManagerVNFnetconf(self._log, self._loop, self, vnf_cfg)
+                if vnf_cfg['protocol'] == 'ssh':
+                    yield from self.ncc.connect_ssh()
+                else:
+                    yield from self.ncc.connect()
+                yield from self.ncc.apply_edit_cfg()
+            elif vnf_cfg['config_method'] == 'rest':
+                if self.rcc is None:
+                    self._log.info("Creating rcc handle for VNF cfg = %s!", vnf_cfg)
+                    self.rcc = ConfigManagerVNFrestconf(self._log, self._loop, self, vnf_cfg)
+                self.ncc.apply_edit_cfg()
+            elif vnf_cfg['config_method'] == 'script':
+                self._log.info("Executing script for VNF cfg = %s!", vnf_cfg)
+                scriptc = ConfigManagerVNFscriptconf(self._log, self._loop, self, vnf_cfg)
+                yield from scriptc.apply_edit_cfg()
+            elif vnf_cfg['config_method'] == 'juju':
+                self._log.info("Executing juju config for VNF cfg = %s!", vnf_cfg)
+                jujuc = ConfigManagerVNFjujuconf(self._log, self._loop, self._parent, vnf_cfg)
+                yield from jujuc.apply_edit_cfg()
+            else:
+                self._log.error("Unknown configuration method(%s) received for %s",
+                                vnf_cfg['config_method'], vnf_cfg['vnf_unique_name'])
+                yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED)
+                return True
+
+            #Update VNF state
+            yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.READY)
+            self._log.info("Successfully applied configuration to VNF: %s",
+                               log_this_vnf(vnf_cfg))
+        except Exception as e:
+            self._log.error("Applying configuration(%s) file(%s) to VNF: %s failed as: %s",
+                            vnf_cfg['config_method'],
+                            vnf_cfg['cfg_file'],
+                            log_this_vnf(vnf_cfg),
+                            str(e))
+            #raise
+            return False
+
+        return True
+        
+class ConfigManagerVNFscriptconf(object):
+
+    def __init__(self, log, loop, parent, vnf_cfg):
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._manager = None
+        self._vnf_cfg = vnf_cfg
+
+    #@asyncio.coroutine
+    def apply_edit_cfg(self):
+        vnf_cfg = self._vnf_cfg
+        self._log.debug("Attempting to apply scriptconf to VNF: %s", log_this_vnf(vnf_cfg))
+        try:
+            st = os.stat(vnf_cfg['cfg_file'])
+            os.chmod(vnf_cfg['cfg_file'], st.st_mode | stat.S_IEXEC)
+            #script_msg = subprocess.check_output(vnf_cfg['cfg_file'], shell=True).decode('utf-8')
+
+            proc = yield from asyncio.create_subprocess_exec(
+                vnf_cfg['script_type'], vnf_cfg['cfg_file'],
+                stdout=asyncio.subprocess.PIPE)
+            script_msg = yield from proc.stdout.read()
+            rc = yield from proc.wait()
+
+            if rc != 0:
+                raise ScriptError(
+                    "script config returned error code : %s" % rc
+                    )
+
+            self._log.debug("config script output (%s)", script_msg)
+        except Exception as e:
+            self._log.error("Error (%s) while executing script config for VNF: %s",
+                            str(e), log_this_vnf(vnf_cfg))
+            raise
+
+class ConfigManagerVNFrestconf(object):
+
+    def __init__(self, log, loop, parent, vnf_cfg):
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._manager = None
+        self._vnf_cfg = vnf_cfg
+
+    def fetch_handle(self, response):
+        if response.error:
+            self._log.error("Failed to send HTTP config request - %s", response.error)
+        else:
+            self._log.debug("Sent HTTP config request - %s", response.body)
+
+    @asyncio.coroutine
+    def apply_edit_cfg(self):
+        vnf_cfg = self._vnf_cfg
+        self._log.debug("Attempting to apply restconf to VNF: %s", log_this_vnf(vnf_cfg))
+        try:
+            http_c = tornadoh.AsyncHTTPClient()
+            # TBD
+            # Read the config entity from file?
+            # Convert connectoin-point?
+            http_c.fetch("http://", self.fetch_handle)
+        except Exception as e:
+            self._log.error("Error (%s) while applying HTTP config", str(e))
+
+class ConfigManagerVNFnetconf(object):
+
+    def __init__(self, log, loop, parent, vnf_cfg):
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._manager = None
+        self._vnf_cfg = vnf_cfg
+
+        self._model = RwYang.Model.create_libncx()
+        self._model.load_schema_ypbc(conmanY.get_schema())
+
+    @asyncio.coroutine
+    def connect(self, timeout_secs=120):
+        vnf_cfg = self._vnf_cfg
+        start_time = time.time()
+        self._log.debug("connecting netconf .... %s", vnf_cfg)
+        while (time.time() - start_time) < timeout_secs:
+
+            try:
+                self._log.info("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg))
+
+                self._manager = yield from ncclient.asyncio_manager.asyncio_connect(
+                    loop=self._loop,
+                    host=vnf_cfg['mgmt_ip_address'],
+                    port=vnf_cfg['port'],
+                    username=vnf_cfg['username'],
+                    password=vnf_cfg['password'],
+                    allow_agent=False,
+                    look_for_keys=False,
+                    hostkey_verify=False,
+                )
+
+                self._log.info("Netconf connected to VNF: %s", log_this_vnf(vnf_cfg))
+                return
+
+            except ncclient.transport.errors.SSHError as e:
+                yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION)
+                self._log.error("Netconf connection to VNF: %s, failed: %s",
+                                log_this_vnf(vnf_cfg), str(e))
+
+            yield from asyncio.sleep(2, loop=self._loop)
+
+        raise ConfigManagerROifConnectionError(
+            "Failed to connect to VNF: %s within %s seconds" %
+            (log_this_vnf(vnf_cfg), timeout_secs)
+        )
+
+    @asyncio.coroutine
+    def connect_ssh(self, timeout_secs=120):
+        vnf_cfg = self._vnf_cfg
+        start_time = time.time()
+
+        if (self._manager != None and self._manager.connected == True):
+            self._log.debug("Disconnecting previous session")
+            self._manager.close_session
+
+        self._log.debug("connecting netconf via SSH .... %s", vnf_cfg)
+        while (time.time() - start_time) < timeout_secs:
+
+            try:
+                yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.CONNECTING)
+                self._log.debug("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg))
+
+                self._manager = ncclient.asyncio_manager.manager.connect_ssh(
+                    host=vnf_cfg['mgmt_ip_address'],
+                    port=vnf_cfg['port'],
+                    username=vnf_cfg['username'],
+                    password=vnf_cfg['password'],
+                    allow_agent=False,
+                    look_for_keys=False,
+                    hostkey_verify=False,
+                )
+
+                yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.NETCONF_SSH_CONNECTED)
+                self._log.debug("netconf over SSH connected to VNF: %s", log_this_vnf(vnf_cfg))
+                return
+
+            except ncclient.transport.errors.SSHError as e:
+                yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION)
+                self._log.error("Netconf connection to VNF: %s, failed: %s",
+                                log_this_vnf(vnf_cfg), str(e))
+
+            yield from asyncio.sleep(2, loop=self._loop)
+
+        raise ConfigManagerROifConnectionError(
+            "Failed to connect to VNF: %s within %s seconds" %
+            (log_this_vnf(vnf_cfg), timeout_secs)
+        )
+
+    @asyncio.coroutine
+    def apply_edit_cfg(self):
+        vnf_cfg = self._vnf_cfg
+        self._log.debug("Attempting to apply netconf to VNF: %s", log_this_vnf(vnf_cfg))
+
+        if self._manager is None:
+            self._log.error("Netconf is not connected to VNF: %s, aborting!", log_this_vnf(vnf_cfg))
+            return
+
+        # Get config file contents
+        try:
+            with open(vnf_cfg['cfg_file']) as f:
+                configuration = f.read()
+        except Exception as e:
+            self._log.error("Reading contents of the configuration file(%s) failed: %s", vnf_cfg['cfg_file'], str(e))
+            return
+
+        try:
+            self._log.debug("apply_edit_cfg to VNF: %s", log_this_vnf(vnf_cfg))
+            xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(configuration)
+            response = yield from self._manager.edit_config(xml, target='running')
+            if hasattr(response, 'xml'):
+                response_xml = response.xml
+            else:
+                response_xml = response.data_xml.decode()
+
+            self._log.debug("apply_edit_cfg response: %s", response_xml)
+            if '<rpc-error>' in response_xml:
+                raise ConfigManagerROifConnectionError("apply_edit_cfg response has rpc-error : %s",
+                                                       response_xml)
+
+            self._log.debug("apply_edit_cfg Successfully applied configuration {%s}", xml)
+        except:
+            raise
+
+class ConfigManagerVNFjujuconf(object):
+
+    def __init__(self, log, loop, parent, vnf_cfg):
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+        self._manager = None
+        self._vnf_cfg = vnf_cfg
+
+    #@asyncio.coroutine
+    def apply_edit_cfg(self):
+        vnf_cfg = self._vnf_cfg
+        self._log.debug("Attempting to apply juju conf to VNF: %s", log_this_vnf(vnf_cfg))
+        try:
+            args = ['python3',
+                vnf_cfg['juju_script'],
+                '--server', vnf_cfg['mgmt_ip_address'],
+                '--user',  vnf_cfg['user'],
+                '--password', vnf_cfg['secret'],
+                '--port', str(vnf_cfg['port']),
+                vnf_cfg['cfg_file']]
+            self._log.error("juju script command (%s)", args)
+
+            proc = yield from asyncio.create_subprocess_exec(
+                *args,
+                stdout=asyncio.subprocess.PIPE)
+            juju_msg = yield from proc.stdout.read()
+            rc = yield from proc.wait()
+
+            if rc != 0:
+                raise ScriptError(
+                    "Juju config returned error code : %s" % rc
+                    )
+
+            self._log.debug("Juju config output (%s)", juju_msg)
+        except Exception as e:
+            self._log.error("Error (%s) while executing juju config", str(e))
+            raise
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_config_template.cfg
new file mode 100644 (file)
index 0000000..d5342c2
--- /dev/null
@@ -0,0 +1,32 @@
+# This template has all supported TAGs.
+# This template can be used as input to the xlate_cfg.py script as follows:
+
+# python3 ./xlate_cfg.py -i ./rwconman_test_config_template.cfg -o ./rwconman_test_config.cfg -x ./rwconman_test_xlate_dict.yml
+
+
+# This is error
+#0. <rw_connection_point_name test/cp2>
+
+# Following are simple TAGs
+1. This is Management IP: <rw_mgmt_ip>
+2. This is Username: <rw_username>
+3. This is Password: <rw_password>
+4. This is globally unique connection point: <rw_connection_point_name test/cp1>
+
+# Following are colon separated complex TAGs
+5. This is connection point for a given VNF with unique member index: <rw_unique_index:rw_connection_point_name 2:test/cp1>
+6. This is converting connection point IP address into network address: <rw_connection_point:masklen_network test/cp1:24> 
+7. This is converting connection point IP address into boadcast address: <rw_connection_point:masklen_broadcast test/cp1:24>
+
+# Following generated tuple with original connectino point name (Global only)
+8. This is not used anywhere: <rw_connection_point_tuple test/cp1>
+
+# Following are multi-colon separated complex TAGs
+9. This is converting connection point IP address into network address VNF with unique member index: <rw_unique_index:rw_connection_point:masklen_network 2:test/cp1:24>
+10. This is converting connection point IP address into network address VNF with unique member index: <rw_unique_index:rw_connection_point:masklen_broadcast 2:test/cp1:24>
+
+# Following test all of the above in single line
+11. All at once: START| rw_mgmt_ip: <rw_mgmt_ip> | rw_username: <rw_username> | rw_password: <rw_password> | global CP: <rw_connection_point_name test/cp1> | 1 CP: <rw_unique_index:rw_connection_point_name 1:test/cp1> | network: <rw_connection_point:masklen_network test/cp1:24> | broadcast: <rw_connection_point:masklen_broadcast test/cp1:24> | tuple: <rw_connection_point_tuple test/cp1> | 2 network: <rw_unique_index:rw_connection_point:masklen_network 2:test/cp1:24> | 2 broadcast: <rw_unique_index:rw_connection_point:masklen_broadcast 2:test/cp1:24> |END
+
+# Need to work on the solution for multiple pattern of same type in single line.
+
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_xlate_dict.yml b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconman_test_xlate_dict.yml
new file mode 100644 (file)
index 0000000..becbff1
--- /dev/null
@@ -0,0 +1,8 @@
+1:
+  test/cp1: 11.0.0.1
+2:
+  test/cp1: 11.0.0.2
+test/cp1: 11.0.0.3
+rw_mgmt_ip: 1.1.1.1
+rw_username: admin
+rw_password: admin
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/rwconmantasklet.py
new file mode 100755 (executable)
index 0000000..7ea73c4
--- /dev/null
@@ -0,0 +1,352 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+'''
+This file - ConfigManagerTasklet()
+|
++--|--> ConfigurationManager()
+        |
+        +--> rwconman_config.py - ConfigManagerConfig()
+        |    |
+        |    +--> ConfigManagerNSR()
+        |
+        +--> rwconman_events.py - ConfigManagerEvents()
+             |
+             +--> ConfigManagerROif()
+
+'''
+
+import asyncio
+import logging
+import os
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwConmanYang as conmanY,
+)
+
+import rift.tasklets
+
+from . import rwconman_config as Config
+from . import rwconman_events as Event
+
+def log_this_vnf(vnf_cfg):
+    log_vnf = ""
+    used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address']
+    for item in used_item_list:
+        if item in vnf_cfg:
+            if item == 'mgmt_ip_address':
+                log_vnf += "({})".format(vnf_cfg[item])
+            else:
+                log_vnf += "{}/".format(vnf_cfg[item])
+    return log_vnf
+
+class ConfigurationManager(object):
+    def __init__(self, log, loop, dts):
+        self._log            = log
+        self._loop           = loop
+        self._dts            = dts
+        self.cfg_sleep       = True
+        self.cfg_dir         = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman")
+        self._config         = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self)
+        self._event          = Event.ConfigManagerEvents(self._dts, self._log, self._loop, self)
+        self.pending_cfg     = []
+        self.pending_tasks   = {}
+        self._nsr_objs       = {}
+
+        self._handlers = [
+            self._config,
+            self._event,
+        ]
+
+
+    @asyncio.coroutine
+    def update_vnf_state(self, vnf_cfg, state):
+        nsr_obj = vnf_cfg['nsr_obj']
+        self._log.info("Updating cm-state for VNF(%s/%s) to:%s", nsr_obj.nsr_name, vnf_cfg['vnfr_name'], state)
+        yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state)
+
+    @asyncio.coroutine
+    def update_ns_state(self, nsr_obj, state):
+        self._log.info("Updating cm-state for NS(%s) to:%s", nsr_obj.nsr_name, state)
+        yield from nsr_obj.update_ns_cm_state(state)
+
+    def add_to_pending(self, nsr_obj):
+
+        if (nsr_obj not in self.pending_cfg and
+            nsr_obj.cm_nsr['state'] == nsr_obj.state_to_string(conmanY.RecordState.RECEIVED)):
+
+            self._log.info("Adding NS={} to pending config list"
+                           .format(nsr_obj.nsr_name))
+
+            # Build the list
+            nsr_obj.vnf_cfg_list = []
+            # Sort all the VNF by their configuration attribute priority
+            sorted_dict = dict(sorted(nsr_obj.nsr_cfg_config_attributes_dict.items()))
+            for config_attributes_dict in sorted_dict.values():
+                # Iterate through each priority level
+                for config_priority in config_attributes_dict:
+                    # Iterate through each vnfr at this priority level
+                    vnfr = nsr_obj._vnfr_dict[config_priority['id']]
+                    self._log.debug("Adding VNF:(%s) to pending cfg list", log_this_vnf(vnfr['vnf_cfg']))
+                    nsr_obj.vnf_cfg_list.append(vnfr['vnf_cfg'])
+            self.pending_cfg.append(nsr_obj)
+
+    def add_nsr_obj(self, nsr_obj):
+        self._log.debug("Adding nsr_obj (%s) to Configuration Manager", nsr_obj)
+        self._nsr_objs[nsr_obj.nsr_id] = nsr_obj
+
+    def remove_nsr_obj(self, nsr_id):
+        self._log.debug("Removing nsr_obj (%s) from Configuration Manager", nsr_id)
+        del self._nsr_objs[nsr_id]
+
+    def get_nsr_obj(self, nsr_id):
+        self._log.debug("Returning nsr_obj (%s) from Configuration Manager", self._nsr_objs[nsr_id])
+        return self._nsr_objs.get(nsr_id)
+
+    @asyncio.coroutine
+    def configuration_handler(self):
+        @asyncio.coroutine
+        def process_vnf_cfg(agent_vnfr, nsr_obj):
+            vnf_cfg = agent_vnfr.vnf_cfg
+            done = False
+
+            if vnf_cfg['cfg_retries']:
+                # This failed previously, lets give it some time
+                yield from asyncio.sleep(5, loop=self._loop)
+
+            vnf_cfg['cfg_retries'] += 1
+
+            # Check to see if this vnfr is managed
+            done = yield from self._config._config_agent_mgr.invoke_config_agent_plugins(
+                'apply_initial_config',
+                nsr_obj.agent_nsr,
+                agent_vnfr)
+            self._log.debug("Apply configuration for VNF={} on attempt {} " \
+                            "returned {}".format(log_this_vnf(vnf_cfg),
+                                                 vnf_cfg['cfg_retries'],
+                                                 done))
+
+            if done:
+                yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.READY)
+
+            else:
+                # Check to see if the VNF configure failed
+                status = yield from self._config._config_agent_mgr.invoke_config_agent_plugins(
+                    'get_config_status',
+                    nsr_obj.agent_nsr,
+                    agent_vnfr)
+
+                if status and status == 'error':
+                    # Failed configuration
+                    nsr_obj.vnf_failed = True
+                    done = True
+                    yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED)
+                    self._log.error("Failed to apply configuration for VNF = {}"
+                                    .format(log_this_vnf(vnf_cfg)))
+
+            return done
+
+        @asyncio.coroutine
+        def process_nsr_obj(nsr_obj):
+            # Return status, this will be set to False is if we fail to configure any VNF
+            ret_status = True
+
+            # Reset VNF failed flag
+            nsr_obj.vnf_failed = False
+            vnf_cfg_list = nsr_obj.vnf_cfg_list
+            while vnf_cfg_list:
+                # Check to make sure the NSR is still valid
+                if nsr_obj.parent.is_nsr_valid(nsr_obj.nsr_id) is False:
+                    self._log.info("NSR {} not found, could be terminated".
+                                    format(nsr_obj.nsr_id))
+                    return
+
+                # Need while loop here, since we will be removing list item
+                vnf_cfg = vnf_cfg_list.pop(0)
+                self._log.info("Applying Pending Configuration for VNF = %s / %s",
+                               log_this_vnf(vnf_cfg), vnf_cfg['agent_vnfr'])
+                vnf_done = yield from process_vnf_cfg(vnf_cfg['agent_vnfr'], nsr_obj)
+                self._log.debug("Applied Pending Configuration for VNF = {}, status={}"
+                                .format(log_this_vnf(vnf_cfg), vnf_done))
+
+                if not vnf_done:
+                    # We will retry, but we will give other VNF chance first since this one failed.
+                    vnf_cfg_list.append(vnf_cfg)
+
+            if nsr_obj.vnf_failed:
+                # Atleast one VNF config failed
+                ret_status = False
+
+            if ret_status:
+                # Apply NS initial config if present
+                nsr_obj.nsr_failed = False
+                self._log.debug("Apply initial config on NSR {}".format(nsr_obj.nsr_name))
+                try:
+                    yield from nsr_obj.parent.process_ns_initial_config(nsr_obj)
+                except Exception as e:
+                    nsr_obj.nsr_failed = True
+                    self._log.exception(e)
+                    ret_status = False
+
+            # Set the config status for the NSR
+            if ret_status:
+                yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.READY)
+            elif nsr_obj.vnf_failed or nsr_obj.nsr_failed:
+                yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED)
+            return ret_status
+
+        # Basically, this loop will never end.
+        while True:
+            # Check the pending tasks are complete
+            # Store a list of tasks that are completed and
+            # remove from the pending_tasks list outside loop
+            ids = []
+            for nsr_id, task in self.pending_tasks.items():
+                if task.done():
+                    ids.append(nsr_id)
+                    e = task.exception()
+                    if e:
+                        self._log.error("Exception in configuring nsr {}: {}".
+                                        format(nsr_id, e))
+                        nsr_obj = self.get_nsr_obj(nsr_id)
+                        if nsr_obj:
+                            yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
+
+                    else:
+                        rc = task.result()
+                        self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
+                else:
+                    self._log.debug("NSR {} still configuring".format(nsr_id))
+
+            # Remove the completed tasks
+            for nsr_id in ids:
+                self.pending_tasks.pop(nsr_id)
+
+            # TODO (pjoseph): Fix this
+            # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
+            # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
+            # wrongfully 10 seconds in advance?
+            yield from asyncio.sleep(10, loop=self._loop)
+
+            if self.pending_cfg:
+                # get first NS, pending_cfg is nsr_obj list
+                nsr_obj = self.pending_cfg[0]
+                nsr_done = False
+                if nsr_obj.being_deleted is False:
+                    # Process this NS, returns back same obj is successfull or exceeded retries
+                    try:
+                        self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
+
+                        # Check if we already have a task running for this NSR
+                        # Case where we are still configuring and terminate is called
+                        if nsr_obj.nsr_id in self.pending_tasks:
+                            self._log.error("NSR {} in state {} has a configure task running.".
+                                            format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
+                            # Terminate the task for this NSR
+                            self.pending_tasks[nsr_obj.nsr_id].cancel()
+
+                        yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
+
+                        # Call in a separate thread
+                        self.pending_tasks[nsr_obj.nsr_id] = \
+                            self._loop.create_task(
+                                    process_nsr_obj(nsr_obj)
+                            )
+
+                        # Remove this nsr_obj
+                        self.pending_cfg.remove(nsr_obj)
+
+                    except Exception as e:
+                        self._log.error("Failed to process NSR as %s", str(e))
+                        self._log.exception(e)
+
+
+    @asyncio.coroutine
+    def register(self):
+        # Perform register() for all handlers
+        for reg in self._handlers:
+            yield from reg.register()
+
+        asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+
+class ConfigManagerTasklet(rift.tasklets.Tasklet):
+    def __init__(self, *args, **kwargs):
+        super(ConfigManagerTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-conman-log")
+
+        self._dts = None
+        self._con_man = None
+
+    def start(self):
+        super(ConfigManagerTasklet, self).start()
+
+        self.log.debug("Registering with dts")
+
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      conmanY.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def init(self):
+        self._log.info("Initializing the Configuration-Manager tasklet")
+        self._con_man = ConfigurationManager(self.log,
+                                             self.loop,
+                                             self._dts)
+        yield from self._con_man.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_cfg.py
new file mode 100644 (file)
index 0000000..add8a9a
--- /dev/null
@@ -0,0 +1,245 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+'''
+This script will go through the input conffiguration template and convert all the matching "regular expression" and "strings"
+specified in xlate_cp_list & xlate_str_list with matching IP addresses passed in as dictionary to this script.
+
+-i Configuration template
+-o Output final configuration complete with IP addresses
+-x Xlate(Translate dictionary in string format
+-t TAGS to be translated
+
+'''
+
+import sys
+import getopt
+import ast
+import re
+import yaml
+import netaddr
+
+from inspect import getsourcefile
+import os.path
+
+xlate_dict = None
+
+def xlate_cp_list(line, cp_list):
+    for cp_string in cp_list:
+        match = re.search(cp_string, line)
+        if match is not None:
+            # resolve IP address using Connection Point dictionary
+            resolved_ip = xlate_dict[match.group(1)]
+            if resolved_ip is None:
+                print("No matching CP found: ", match.group(1))
+                exit(2)
+            else:
+                line = line[:match.start()] + resolved_ip + line[match.end():]
+    return line
+
+def xlate_multi_colon_list(line, multi_colon_list):
+    for ucp_string in multi_colon_list:
+        #print("Searching :", ucp_string)
+        match = re.search(ucp_string, line)
+        if match is not None:
+            #print("match :", match.group())
+            # resolve IP address using Connection Point dictionary for specified member (unique) index
+            ucp_str_list = match.group(1).split(':')
+            print("matched = {}, split list = {}".format(match.group(1), ucp_str_list))
+            if len(ucp_str_list) != 3:
+                print("Invalid TAG in the configuration: ", match.group(1))
+                exit(2)
+
+            # Traslate given CP address & mask into netaddr
+            if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen'):
+                member_vnf_index = int(ucp_str_list[0])
+                resolved_ip = xlate_dict[ucp_str_list[1]]
+                masklen = ucp_str_list[2]
+                if resolved_ip is None:
+                    print("No matching CP found: ", ucp_str_list[1])
+                    exit(2)
+                if int(masklen) <= 0:
+                    print("Invalid mask length: ", masklen)
+                    exit(2)
+                else:
+                    # Generate netaddr
+                    ip_str = resolved_ip + '/' + masklen
+                    #print("ip_str:", ip_str)
+                    ip = netaddr.IPNetwork(ip_str)
+                    if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen_broadcast'):
+                        # Traslate given CP address & mask into broadcast address
+                        addr = ip.broadcast
+                    if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen_network'):
+                        # Traslate given CP address & mask into network address
+                        addr = ip.network
+                    line = line[:match.start()] + str(addr) + line[match.end():]
+    return line
+
+
+
+def xlate_colon_list(line, colon_list):
+    for ucp_string in colon_list:
+        #print("Searching :", ucp_string)
+        match = re.search(ucp_string, line)
+        if match is not None:
+            #print("match :", match.group())
+            # resolve IP address using Connection Point dictionary for specified member (unique) index
+            ucp_str_list = match.group(1).split(':')
+            #print("matched = {}, split list = {}".format(match.group(1), ucp_str_list))
+            if len(ucp_str_list) != 2:
+                print("Invalid TAG in the configuration: ", match.group(1))
+                exit(2)
+
+            # Unique Connection Point translation to IP
+            if ucp_string.startswith('<rw_unique_index:'):
+                member_vnf_index = int(ucp_str_list[0])
+                resolved_ip = xlate_dict[member_vnf_index][ucp_str_list[1]]
+                #print("member_vnf_index = {}, resolved_ip = {}", member_vnf_index, resolved_ip)
+                if resolved_ip is None:
+                    print("For Unique index ({}), No matching CP found: {}", ucp_str_list[0], ucp_str_list[1])
+                    exit(2)
+                else:
+                    line = line[:match.start()] + resolved_ip + line[match.end():]
+
+            # Traslate given CP address & mask into netaddr
+            if ucp_string.startswith('<rw_connection_point:masklen'):
+                resolved_ip = xlate_dict[ucp_str_list[0]]
+                masklen = ucp_str_list[1]
+                if resolved_ip is None:
+                    print("No matching CP found: ", ucp_str_list[0])
+                    exit(2)
+                if int(masklen) <= 0:
+                    print("Invalid mask length: ", masklen)
+                    exit(2)
+                else:
+                    # Generate netaddr
+                    ip_str = resolved_ip + '/' + masklen
+                    #print("ip_str:", ip_str)
+                    ip = netaddr.IPNetwork(ip_str)
+                    
+                    if ucp_string.startswith('<rw_connection_point:masklen_broadcast'):
+                        # Traslate given CP address & mask into broadcast address
+                        addr = ip.broadcast
+                    if ucp_string.startswith('<rw_connection_point:masklen_network'):
+                        # Traslate given CP address & mask into network address
+                        addr = ip.network
+                        
+                    line = line[:match.start()] + str(addr) + line[match.end():]
+    return line
+
+def xlate_cp_to_tuple_list(line, cp_to_tuple_list):
+    for cp_string in cp_to_tuple_list:
+        match = re.search(cp_string, line)
+        if match is not None:
+            # resolve IP address using Connection Point dictionary
+            resolved_ip = xlate_dict[match.group(1)]
+            if resolved_ip is None:
+                print("No matching CP found: ", match.group(1))
+                exit(2)
+            else:
+                line = line[:match.start()] + match.group(1) + ':'  + resolved_ip + line[match.end():]
+    return line
+
+def xlate_str_list(line, str_list):
+    for replace_tag in str_list:
+        replace_string = replace_tag[1:-1]
+        line = line.replace(replace_tag, xlate_dict[replace_string])
+    return line
+
+    
+def main(argv=sys.argv[1:]):
+    cfg_template = None
+    cfg_file = None
+    global xlate_dict
+    try:
+        opts, args = getopt.getopt(argv,"i:o:x:")
+    except getopt.GetoptError:
+        print("Check arguments {}".format(argv))
+        sys.exit(2)
+    for opt, arg in opts:
+        if opt == '-i':
+            cfg_template = arg
+        elif opt in ("-o"):
+            cfg_file = arg
+        elif opt in ("-x"):
+            xlate_arg = arg
+
+    # Read TAGS from yaml file
+    # Read the translation tags from yaml file
+    yml_dir = os.path.dirname(os.path.abspath(getsourcefile(lambda:0)))
+    tags_input_file = os.path.join(yml_dir, 'xlate_tags.yml')
+    with open(tags_input_file, "r") as ti:
+        xlate_tags = yaml.load(ti.read())
+
+    # Need to work on the solution for multiple pattern of same type in single line.
+    try:
+        with open(xlate_arg, "r") as ti:
+            xlate_dict = yaml.load(ti.read())
+        try:
+            with open(cfg_template, 'r') as r:
+                try:
+                    with open(cfg_file, 'w') as w:
+                        # Traslate
+                        try:
+                            # For each line
+                            for line in r:
+                                if line.startswith("#"):
+                                    # Skip comment lines
+                                    continue
+                                #print("1.Line : ", line)
+                                # For each Connection Point translation to IP
+                                line = xlate_cp_list(line, xlate_tags['xlate_cp_list'])
+                                #print("2.Line : ", line)
+                                
+                                # For each colon(:) separated tag, i.e. 3 inputs in a tag.
+                                line = xlate_multi_colon_list(line, xlate_tags['xlate_multi_colon_list'])
+                                #print("2a.Line : ", line)
+
+                                # For each colon(:) separated tag, i.e. 2 inputs in a tag.
+                                line = xlate_colon_list(line, xlate_tags['xlate_colon_list'])
+                                #print("3.Line : ", line)
+
+                                # For each connection point to tuple replacement
+                                line = xlate_cp_to_tuple_list(line, xlate_tags['xlate_cp_to_tuple_list'])
+                                #print("4.Line : ", line)
+
+                                # For each direct replacement (currently only management IP address for ping/pong)
+                                line = xlate_str_list(line, xlate_tags['xlate_str_list'])
+                                #print("5.Line : ", line)
+
+                                # Finally write the modified line to the new config file
+                                w.write(line)
+                        except Exception as e:
+                            print("Error ({}) on line: {}".format(str(e), line))
+                            exit(2)
+                except Exception as e:
+                    print("Failed to open for write: {}, error({})".format(cfg_file, str(e)))
+                    exit(2)
+        except Exception as e:
+            print("Failed to open for read: {}, error({})".format(cfg_template, str(e)))
+            exit(2)
+        print("Wrote configuration file", cfg_file)
+    except Exception as e:
+        print("Could not translate dictionary, error: ", str(e))
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as e:
+        print(str(e))
diff --git a/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_tags.yml b/rwcm/plugins/rwconman/rift/tasklets/rwconmantasklet/xlate_tags.yml
new file mode 100644 (file)
index 0000000..412e91e
--- /dev/null
@@ -0,0 +1,58 @@
+# """
+# # 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# @file xlate_tags.yml
+# @author Manish Patel (Manish.Patel@riftio.com)
+# @date 01/14/2016
+# """
+
+# This file contains the tags that needs translation
+# One can add some tags with processing limitations by the translation script.
+
+# Add Regular expressions here (connection-points received dynamically from VNFR)
+
+# Translate connection point names (Connection point name is read using RegEx)
+
+xlate_cp_list :
+  - <rw_connection_point_name (.*?)>
+
+# Literal string translations
+xlate_str_list :
+  - <rw_mgmt_ip>
+  - <rw_username>
+  - <rw_password>
+
+# This list contains 2 tags separated by colon (:)
+xlate_colon_list :
+  # Fetch CP from the member_index dictionary (I.e. CP of a particular VNF)
+  - <rw_unique_index:rw_connection_point_name (.*?)>
+  # Generate network address from CP address and mask (mask is expected to be a hard coded number in config)
+  - <rw_connection_point:masklen_network (.*?)>
+  # Generate broadcast address from CP address and mask (mask is expected to be a hard coded number in config)
+  - <rw_connection_point:masklen_broadcast (.*?)>
+
+# This list contains 3 tags separated by colon (:)  
+xlate_multi_colon_list :
+  # Generate network address from CP of a particular VNF (mask is expected to be a hard coded number in config))
+  - <rw_unique_index:rw_connection_point:masklen_network (.*?)>
+  # Generate broadcast address from CP of a particular VNF (mask is expected to be a hard coded number in config))
+  - <rw_unique_index:rw_connection_point:masklen_broadcast (.*?)>
+
+# This translates connection point name and generates tuple with name:resolved IP
+xlate_cp_to_tuple_list :
+  - <rw_connection_point_tuple (.*?)>
+  
diff --git a/rwcm/plugins/rwconman/rwconmantasklet.py b/rwcm/plugins/rwconman/rwconmantasklet.py
new file mode 100755 (executable)
index 0000000..796c4af
--- /dev/null
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwconmantasklet
+class Tasklet(rift.tasklets.rwconmantasklet.ConfigManagerTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwcm/plugins/yang/CMakeLists.txt b/rwcm/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..cdb2734
--- /dev/null
@@ -0,0 +1,44 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Manish Patel
+# Creation Date: 10/28/2015
+# 
+
+##
+# Yang targets
+##
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-conman-log
+    START_EVENT_ID 66000
+    OUT_YANG_FILE_VAR rw_conman_log_file
+    )
+
+rift_add_yang_target(
+  TARGET rw_conman_yang
+  YANG_FILES rw-conman.yang ${rw_conman_log_file}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    mano_yang_gen
+    mano-types_yang_gen
+    rwconfig_agent_yang_gen
+  DEPENDS
+    mano_yang
+    rwconfig_agent_yang
+    mano-types_yang
+)
+
+
diff --git a/rwcm/plugins/yang/rw-conman.tailf.yang b/rwcm/plugins/yang/rw-conman.tailf.yang
new file mode 100644 (file)
index 0000000..aabbdd5
--- /dev/null
@@ -0,0 +1,38 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-conman-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-conman-annotation";
+  prefix "rw-conman-ann";
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  import rw-conman {
+    prefix conman;
+  }
+
+  tailf:annotate "/conman:cm-state" {
+    tailf:callpoint rw_callpoint;
+  }
+  
+}
diff --git a/rwcm/plugins/yang/rw-conman.yang b/rwcm/plugins/yang/rw-conman.yang
new file mode 100644 (file)
index 0000000..bb1555d
--- /dev/null
@@ -0,0 +1,260 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-conman.yang
+ * @author Manish Patel
+ * @date 2015/10/27
+ * @brief Service Orchestrator configuration yang
+ */
+
+module rw-conman
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-conman";
+  prefix "rw-conman";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import nsr {
+    prefix "nsr";
+  }
+
+  import vnfr {
+    prefix "vnfr";
+  }
+
+  import rw-vlr {
+    prefix "rwvlr";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-base {
+    prefix "manobase";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import rw-config-agent {
+    prefix "rw-config-agent";
+  }
+
+  revision 2015-10-27 {
+    description
+      "Initial revision.";
+  }
+
+  // typedef ro-endpoint-method {
+  //   type enumeration {
+  //     enum netconf;
+  //     enum restconf;
+  //   }
+  // }
+  
+  grouping ro-endpoint {
+    // leaf ro-endpoint-method {
+    //   description "interface between CM & RO, defaults to netconf";
+    //   type ro-endpoint-method;
+    //   default netconf;
+    // }
+    leaf ro-ip-address {
+      type inet:ip-address;
+      description "IP Address";
+      default "127.0.0.1";
+    }
+    leaf ro-port {
+      type inet:port-number;
+      description "Port Number";
+      default 2022;
+    }
+    leaf ro-username {
+      description "RO endpoint username";
+      type string;
+      default "admin";
+    }
+    leaf ro-password {
+      description "RO endpoint password";
+      type string;
+      default "admin";
+    }
+  }
+
+  grouping vnf-cfg-items {
+    leaf configuration-file {
+      description "Location of the confguration file on CM system";
+      type string;
+    }
+    leaf translator-script {
+      description "Script that translates the templates in the configuration-file using VNFR information
+                   Currently, we only use IP address translations.
+                   configuration will use connection point name instead of IP addresses.";
+      type string;
+    }
+  }
+  
+  container cm-config {
+    description "Service Orchestrator specific configuration";
+    rwpb:msg-new "SoConfig";
+    rwcli:new-mode "cm-config";
+
+    container ro-endpoint {
+      description "Resource Orchestrator endpoint ip address";
+      rwpb:msg-new "RoEndpoint";
+      uses ro-endpoint;
+    }
+    
+    //uses vnf-cfg-items;
+
+    list nsr {
+      key "id";
+      leaf id {
+        description "Indicates NSR bringup complete, now initiate configuration of the NSR";
+        type yang:uuid;
+      }
+    }
+  }// cm-config
+  
+  // =================== SHOW ==================
+  typedef record-state {
+    type enumeration {
+      enum init;
+      enum received;
+      enum cfg-delay;
+      enum cfg-process;
+      enum cfg-process-failed;
+      enum cfg-sched;
+      enum connecting;
+      enum failed-connection;
+      enum netconf-connected;
+      enum netconf-ssh-connected;
+      enum restconf-connected;
+      enum cfg-send;
+      enum cfg-failed;
+      enum ready-no-cfg;
+      enum ready;
+    }
+  }
+
+  // TBD: Do we need this typedef, currently not used anywhere
+  typedef cfg-type {
+    type enumeration {
+      enum none;
+      enum scriptconf;
+      enum netconf;
+      enum restconf;
+      enum jujuconf;
+    }
+  }
+
+
+  // This is also used by RO (Resource Orchestrator) to indicate NSR is ready
+  // It will only fill in IDs
+  container cm-state {
+    rwpb:msg-new "CmOpdata";
+    config false;
+    description "CM NS & VNF states";
+
+    leaf states {
+      description "CM various states";
+      type string;
+    }
+    
+    list cm-nsr {
+      description "List of NS Records";
+      key "id";
+      leaf id {
+        type yang:uuid;
+      }
+      leaf name {
+        description "NSR name.";
+        type string;
+      }
+      leaf state {
+        description "State of NSR";
+        type record-state;
+      }
+      leaf state-details {
+        description "Details of the state of NSR, in case of errors";
+        type string;
+      }
+      
+      list cm-vnfr {
+        description "List of VNF Records within NS Record";
+        key "id";
+        leaf id {
+          type yang:uuid;
+        }
+        leaf name {
+          description "VNFR name.";
+          type string;
+        }
+        leaf state {
+          description "Last known state of this VNFR";
+          type record-state;
+        }
+        container mgmt-interface {
+          leaf ip-address {
+            type inet:ip-address;
+          }
+          leaf port {
+            type inet:port-number;
+          }
+        }
+        leaf cfg-type {
+          type string;
+        }
+        leaf cfg-location {
+          type inet:uri;
+        }
+        list connection-point {
+          key "name";
+          leaf name {
+            description "Connection Point name";
+            type string;
+          }
+          leaf ip-address {
+            description "IP address assigned to this connection point";
+            type inet:ip-address;
+          }
+        }
+      } // list VNFR
+    } // list NSR
+  } // cm-state
+  
+} // rw-conman
diff --git a/rwcm/test/CMakeLists.txt b/rwcm/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ead05af
--- /dev/null
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Manish Patel
+# Creation Date: 10/28/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(CONMAN_INSTALL "demos/conman")
+
+install(
+  FILES
+    start_cm_system.py
+    README.start_cm
+  DESTINATION ${CONMAN_INSTALL}
+  COMPONENT ${PKG_LONG_NAME})
+  
+# set(NS_NAME ping_pong_nsd)
+# install(
+#   FILES
+#     ${NS_NAME}/configuration_input_params.yml
+#     ${NS_NAME}/ping_vnfd_1_scriptconf_template.cfg
+#     ${NS_NAME}/pong_vnfd_11_scriptconf_template.cfg
+#   DESTINATION ${CONMAN_INSTALL}/${NS_NAME}
+#   COMPONENT ${PKG_LONG_NAME})
+
diff --git a/rwcm/test/README.start_cm b/rwcm/test/README.start_cm
new file mode 100644 (file)
index 0000000..7a8098b
--- /dev/null
@@ -0,0 +1,4 @@
+# Following example command line to launch the system in collapse mode.
+# Please tailor for expanded mode or any other requirements
+
+./start_cm_system.py -m ethsim -c --skip-prepare-vm
diff --git a/rwcm/test/cwims_juju_nsd/configuration_input_params.yml b/rwcm/test/cwims_juju_nsd/configuration_input_params.yml
new file mode 100644 (file)
index 0000000..bbbe5bc
--- /dev/null
@@ -0,0 +1,20 @@
+
+# This is input parameters file for Network Service configuration.
+# This file is formatted as below:
+
+# configuration_delay : 120           # Number of seconds to wait before applying configuration after NS is up
+# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator
+# 1 :                                 # Configuration Priority, order in which each VNF will be configured
+#   name : vnfd_name                  # Name of the VNF
+#   member_vnf_index : 11             # member index of the VNF that makes it unique (in case of multiple instances of same VNF)
+#   configuration_type : scriptconf   # Type of configuration (Currently supported values : scriptconf, netconf)
+#
+# Repeat VNF block for as many VNFs
+
+configuration_delay : 30
+number_of_vnfs_to_be_configured : 1
+1 :
+  name : cwims_vnfd
+  member_vnf_index : 1
+  configuration_type : jujuconf
+
diff --git a/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg b/rwcm/test/cwims_juju_nsd/cwaio_vnfd_1_juju_template.cfg
new file mode 100644 (file)
index 0000000..d32efe3
--- /dev/null
@@ -0,0 +1,23 @@
+ims-a:
+  deploy:
+    store: local
+    directory: /usr/rift/charms/cw-aio-proxy/trusty/
+    series: trusty
+    to: "lxc:0"
+
+  # Data under config passed as such during deployment
+  config:
+      proxied_ip: <rw_mgmt_ip>
+      home_domain: "ims.riftio.local"
+      base_number: "1234567000"
+      number_count: 1000
+
+  units:
+    - unit:
+        # Wait for each command to complete
+        wait: true
+        # Bail on failure
+        bail: true
+        actions:
+          - create-user: { number: "1234567001", password: "secret"}
+          - create-user: { number: "1234567002", password: "secret"}
diff --git a/rwcm/test/ping_pong_nsd/configuration_input_params.yml b/rwcm/test/ping_pong_nsd/configuration_input_params.yml
new file mode 100644 (file)
index 0000000..47c4fc3
--- /dev/null
@@ -0,0 +1,23 @@
+
+# This is input parameters file for Network Service configuration.
+# This file is formatted as below:
+
+# configuration_delay : 120           # Number of seconds to wait before applying configuration after NS is up
+# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator
+# 1 :                                 # Configuration Priority, order in which each VNF will be configured
+#   name : vnfd_name                  # Name of the VNF
+#   member_vnf_index : 11             # member index of the VNF that makes it unique (in case of multiple instances of same VNF)
+#   configuration_type : scriptconf   # Type of configuration (Currently supported values : scriptconf, netconf)
+#
+# Repeat VNF block for as many VNFs
+
+configuration_delay : 30
+number_of_vnfs_to_be_configured : 2
+1 :
+  name : pong_vnfd
+  member_vnf_index : 2
+  configuration_type : scriptconf
+2 :
+  name : ping_vnfd
+  member_vnf_index : 1
+  configuration_type : scriptconf
diff --git a/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg b/rwcm/test/ping_pong_nsd/ping_vnfd_1_scriptconf_template.cfg
new file mode 100755 (executable)
index 0000000..e6e9889
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# Rest API config
+ping_mgmt_ip='<rw_mgmt_ip>'
+ping_mgmt_port=18888
+
+# VNF specific configuration
+pong_server_ip='<rw_connection_point_name pong_vnfd/cp0>'
+ping_rate=5
+server_port=5555
+
+# Make rest API calls to configure VNF
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set server info for ping!"
+    exit $rc
+fi
+
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"rate\":$ping_rate}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set ping rate!"
+    exit $rc
+fi
+
+output=$(curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"enable\":true}" \
+    http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
+if [[ $output == *"Internal Server Error"* ]]
+then
+    echo $output
+    exit 3
+else
+    echo $output
+fi
+
+
+exit 0
diff --git a/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg b/rwcm/test/ping_pong_nsd/pong_vnfd_11_scriptconf_template.cfg
new file mode 100755 (executable)
index 0000000..28b01df
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Rest API configuration
+pong_mgmt_ip='<rw_mgmt_ip>'
+pong_mgmt_port=18889
+
+# Test
+# username=<rw_username>
+# password=<rw_password>
+
+# VNF specific configuration
+pong_server_ip='<rw_connection_point_name pong_vnfd/cp0>'
+server_port=5555
+
+# Make Rest API calls to configure VNF
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to set server(own) info for pong!"
+    exit $rc
+fi
+
+curl -D /dev/stdout \
+    -H "Accept: application/vnd.yang.data+xml" \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d "{\"enable\":true}" \
+    http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state
+rc=$?
+if [ $rc -ne 0 ]
+then
+    echo "Failed to enable pong service!"
+    exit $rc
+fi
+
+exit 0
diff --git a/rwcm/test/rwso_test.py b/rwcm/test/rwso_test.py
new file mode 100755 (executable)
index 0000000..e0c5011
--- /dev/null
@@ -0,0 +1,349 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import logging
+import os
+import sys
+import types
+import unittest
+import uuid
+
+import xmlrunner
+
+import gi.repository.CF as cf
+import gi.repository.RwDts as rwdts
+import gi.repository.RwMain as rwmain
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.RwConmanYang as conmanY
+import gi.repository.RwLaunchpadYang as launchpadyang
+
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class RWSOTestCase(unittest.TestCase):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+    rwmain = None
+    tinfo = None
+    schema = None
+    id_cnt = 0
+
+    @classmethod
+    def setUpClass(cls):
+        msgbroker_dir = os.environ.get('MESSAGE_BROKER_DIR')
+        router_dir = os.environ.get('ROUTER_DIR')
+        cm_dir = os.environ.get('SO_DIR')
+
+        manifest = rwmanifest.Manifest()
+        manifest.init_phase.settings.rwdtsrouter.single_dtsrouter.enable = True
+
+        cls.rwmain = rwmain.Gi.new(manifest)
+        cls.tinfo = cls.rwmain.get_tasklet_info()
+
+        # Run router in mainq.  Eliminates some ill-diagnosed bootstrap races.
+        os.environ['RWDTS_ROUTER_MAINQ']='1'
+        cls.rwmain.add_tasklet(msgbroker_dir, 'rwmsgbroker-c')
+        cls.rwmain.add_tasklet(router_dir, 'rwdtsrouter-c')
+        cls.rwmain.add_tasklet(cm_dir, 'rwconmantasklet')
+
+        cls.log = rift.tasklets.logger_from_tasklet_info(cls.tinfo)
+        cls.log.setLevel(logging.DEBUG)
+
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler.setFormatter(fmt)
+        cls.log.addHandler(stderr_handler)
+        cls.schema = conmanY.get_schema()
+
+    def setUp(self):
+        def scheduler_tick(self, *args):
+            self.call_soon(self.stop)
+            self.run_forever()
+
+        self.loop = asyncio.new_event_loop()
+        self.loop.scheduler_tick = types.MethodType(scheduler_tick, self.loop)
+        self.loop.set_debug(True)
+        os.environ["PYTHONASYNCIODEBUG"] = "1"
+        asyncio_logger = logging.getLogger("asyncio")
+        asyncio_logger.setLevel(logging.DEBUG)
+
+        self.asyncio_timer = None
+        self.stop_timer = None
+        self.id_cnt += 1
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(1, loop=self.loop)
+
+    def run_until(self, test_done, timeout=30):
+        """
+        Attach the current asyncio event loop to rwsched and then run the
+        scheduler until the test_done function returns True or timeout seconds
+        pass.
+
+        @param test_done  - function which should return True once the test is
+                            complete and the scheduler no longer needs to run.
+        @param timeout    - maximum number of seconds to run the test.
+        """
+        def shutdown(*args):
+            if args:
+                self.log.debug('Shutting down loop due to timeout')
+
+            if self.asyncio_timer is not None:
+                self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.asyncio_timer)
+                self.asyncio_timer = None
+
+            if self.stop_timer is not None:
+                self.tinfo.rwsched_tasklet.CFRunLoopTimerRelease(self.stop_timer)
+                self.stop_timer = None
+
+            self.tinfo.rwsched_instance.CFRunLoopStop()
+
+        def tick(*args):
+            self.loop.call_later(0.1, self.loop.stop)
+            self.loop.run_forever()
+            if test_done():
+                shutdown()
+
+        self.asyncio_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer(
+            cf.CFAbsoluteTimeGetCurrent(),
+            0.1,
+            tick,
+            None)
+
+        self.stop_timer = self.tinfo.rwsched_tasklet.CFRunLoopTimer(
+            cf.CFAbsoluteTimeGetCurrent() + timeout,
+            0,
+            shutdown,
+            None)
+
+        self.tinfo.rwsched_tasklet.CFRunLoopAddTimer(
+            self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(),
+            self.stop_timer,
+            self.tinfo.rwsched_instance.CFRunLoopGetMainMode())
+
+        self.tinfo.rwsched_tasklet.CFRunLoopAddTimer(
+            self.tinfo.rwsched_tasklet.CFRunLoopGetCurrent(),
+            self.asyncio_timer,
+            self.tinfo.rwsched_instance.CFRunLoopGetMainMode())
+
+        self.tinfo.rwsched_instance.CFRunLoopRun()
+
+        self.assertTrue(test_done())
+
+    def new_tinfo(self, name):
+        """
+        Create a new tasklet info instance with a unique instance_id per test.
+        It is up to each test to use unique names if more that one tasklet info
+        instance is needed.
+
+        @param name - name of the "tasklet"
+        @return     - new tasklet info instance
+        """
+        ret = self.rwmain.new_tasklet_info(name, RWSOTestCase.id_cnt)
+
+        log = rift.tasklets.logger_from_tasklet_info(ret)
+        log.setLevel(logging.DEBUG)
+
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler.setFormatter(fmt)
+        log.addHandler(stderr_handler)
+
+        return ret
+
+    def get_cloud_account_msg(self):
+        cloud_account = launchpadyang.CloudAccount()
+        cloud_account.name = "cloudy"
+        cloud_account.account_type = "mock"
+        cloud_account.mock.username = "rainy"
+        return cloud_account
+
+    def get_compute_pool_msg(self, name, pool_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "compute"
+        if pool_type == "static":
+            # Need to query CAL for resource
+            pass
+        else:
+            pool.max_size = 10
+        return pool_config
+
+    def get_network_pool_msg(self, name, pool_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "network"
+        if pool_type == "static":
+            # Need to query CAL for resource
+            pass
+        else:
+            pool.max_size = 4
+        return pool_config
+
+
+    def get_network_reserve_msg(self, xpath):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VirtualLinkEventData()
+        msg.event_id = event_id
+        msg.request_info.name = "mynet"
+        msg.request_info.subnet = "1.1.1.0/24"
+        return msg, xpath.format(event_id)
+
+    def get_compute_reserve_msg(self,xpath):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VDUEventData()
+        msg.event_id = event_id
+        msg.request_info.name = "mynet"
+        msg.request_info.image_id  = "This is a image_id"
+        msg.request_info.vm_flavor.vcpu_count = 4
+        msg.request_info.vm_flavor.memory_mb = 8192*2
+        msg.request_info.vm_flavor.storage_gb = 40
+        c1 = msg.request_info.connection_points.add()
+        c1.name = "myport1"
+        c1.virtual_link_id = "This is a network_id"
+        return msg, xpath.format(event_id)
+
+    def test_create_resource_pools(self):
+        self.log.debug("STARTING - test_create_resource_pools")
+        tinfo = self.new_tinfo('poolconfig')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+        pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+        pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+        account_xpath = "C,/rw-launchpad:cloud-account"
+        compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+        network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+
+        @asyncio.coroutine
+        def configure_cloud_account():
+            msg = self.get_cloud_account_msg()
+            self.log.info("Configuring cloud-account: %s",msg)
+            yield from dts.query_create(account_xpath,
+                                        rwdts.XactFlag.ADVISE,
+                                        msg)
+            yield from asyncio.sleep(3, loop=self.loop)
+
+        @asyncio.coroutine
+        def configure_compute_resource_pools():
+            msg = self.get_compute_pool_msg("virtual-compute", "dynamic")
+            self.log.info("Configuring compute-resource-pool: %s",msg)
+            yield from dts.query_create(pool_xpath,
+                                        rwdts.XactFlag.ADVISE,
+                                        msg)
+            yield from asyncio.sleep(3, loop=self.loop)
+
+
+        @asyncio.coroutine
+        def configure_network_resource_pools():
+            msg = self.get_network_pool_msg("virtual-network", "dynamic")
+            self.log.info("Configuring network-resource-pool: %s",msg)
+            yield from dts.query_create(pool_xpath,
+                                        rwdts.XactFlag.ADVISE,
+                                        msg)
+            yield from asyncio.sleep(3, loop=self.loop)
+
+
+        @asyncio.coroutine
+        def verify_resource_pools():
+            self.log.debug("Verifying test_create_resource_pools results")
+            res_iter = yield from dts.query_read(pool_records_xpath,)
+            for result in res_iter:
+                response = yield from result
+                records = response.result.records
+                #self.assertEqual(len(records), 2)
+                #names = [i.name for i in records]
+                #self.assertTrue('virtual-compute' in names)
+                #self.assertTrue('virtual-network' in names)
+                for record in records:
+                    self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Busy Resources: %d",
+                                   record.name,
+                                   record.resource_type,
+                                   record.pool_status,
+                                   record.max_size,
+                                   record.busy_resources)
+        @asyncio.coroutine
+        def reserve_network_resources():
+            msg,xpath = self.get_network_reserve_msg(network_xpath)
+            self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
+            yield from dts.query_create(xpath, rwdts.XactFlag.TRACE, msg)
+            yield from asyncio.sleep(3, loop=self.loop)
+            yield from dts.query_delete(xpath, rwdts.XactFlag.TRACE)
+
+        @asyncio.coroutine
+        def reserve_compute_resources():
+            msg,xpath = self.get_compute_reserve_msg(compute_xpath)
+            self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
+            yield from dts.query_create(xpath, rwdts.XactFlag.TRACE, msg)
+            yield from asyncio.sleep(3, loop=self.loop)
+            yield from dts.query_delete(xpath, rwdts.XactFlag.TRACE)
+
+        @asyncio.coroutine
+        def run_test():
+            yield from self.wait_tasklets()
+            yield from configure_cloud_account()
+            yield from configure_compute_resource_pools()
+            yield from configure_network_resource_pools()
+            yield from verify_resource_pools()
+            yield from reserve_network_resources()
+            yield from reserve_compute_resources()
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_create_resource_pools")
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    if 'SO_DIR' not in os.environ:
+        os.environ['SO_DIR'] = os.path.join(plugin_dir, 'rwconmantasklet')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwcm/test/start_cm_system.py b/rwcm/test/start_cm_system.py
new file mode 100755 (executable)
index 0000000..1975a0a
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import sys
+
+import rift.vcs
+import rift.vcs.demo
+import rift.vcs.vms
+
+from rift.vcs.ext import ClassProperty
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigManagerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents SO tasklet.
+    """
+
+    def __init__(self, name='rwcmtasklet', uid=None):
+        """
+        Creates a PingTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ConfigManagerTasklet, self).__init__(name=name, uid=uid)
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
+    plugin_name = ClassProperty('rwconmantasklet')
+
+
+# Construct the system. This system consists of 1 cluster in 1
+# colony. The master cluster houses CLI and management VMs
+sysinfo = rift.vcs.SystemInfo(
+        colonies=[
+            rift.vcs.Colony(
+                clusters=[
+                    rift.vcs.Cluster(
+                        name='master',
+                        virtual_machines=[
+                            rift.vcs.VirtualMachine(
+                                name='vm-so',
+                                ip='127.0.0.1',
+                                tasklets=[
+                                    rift.vcs.uAgentTasklet(),
+                                    ],
+                                procs=[
+                                    rift.vcs.CliTasklet(),
+                                    rift.vcs.DtsRouterTasklet(),
+                                    rift.vcs.MsgBrokerTasklet(),
+                                    rift.vcs.RestconfTasklet(),
+                                    ConfigManagerTasklet()
+                                    ],
+                                ),
+                            ]
+                        )
+                    ]
+                )
+            ]
+        )
+
+
+# Define the generic portmap.
+port_map = {}
+
+
+# Define a mapping from the placeholder logical names to the real
+# port names for each of the different modes supported by this demo.
+port_names = {
+    'ethsim': {
+    },
+    'pci': {
+    }
+}
+
+
+# Define the connectivity between logical port names.
+port_groups = {}
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s')
+
+    # Create a parser which includes all generic demo arguments
+    parser = rift.vcs.demo.DemoArgParser()
+
+    args = parser.parse_args(argv)
+
+    #load demo info and create Demo object
+    demo = rift.vcs.demo.Demo(sysinfo=sysinfo,
+                              port_map=port_map,
+                              port_names=port_names,
+                              port_groups=port_groups)
+
+    # Create the prepared system from the demo
+    system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, netconf_trace_override=True)
+
+    # Start the prepared system
+    system.start()
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except rift.vcs.demo.ReservationError:
+        print("ERROR: unable to retrieve a list of IP addresses from the reservation system")
+        sys.exit(1)
+    except rift.vcs.demo.MissingModeError:
+        print("ERROR: you need to provide a mode to run the script")
+        sys.exit(1)
+    finally:
+        os.system("stty sane")
diff --git a/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml b/rwcm/test/tg_vrouter_ts_nsd/configuration_input_params.yml
new file mode 100644 (file)
index 0000000..b5a126f
--- /dev/null
@@ -0,0 +1,23 @@
+
+# This is input parameters file for Network Service configuration.
+# This file is formatted as below:
+
+# configuration_delay : 120           # Number of seconds to wait before applying configuration after NS is up
+# number_of_vnfs_to_be_configured : 1 # Total number of VNFs in this NS to be configured by Service Orchestrator
+# 1 :                                 # Configuration Priority, order in which each VNF will be configured
+#   name : vnfd_name                  # Name of the VNF
+#   member_vnf_index : 11             # member index of the VNF that makes it unique (in case of multiple instances of same VNF)
+#   configuration_type : scriptconf   # Type of configuration (Currently supported values : scriptconf, netconf)
+#
+# Repeat VNF block for as many VNFs
+
+configuration_delay : 120
+number_of_vnfs_to_be_configured : 2
+1 :
+  name : trafsink_vnfd
+  member_vnf_index : 3
+  configuration_type : netconf
+2 :
+  name : trafgen_vnfd
+  member_vnf_index : 1
+  configuration_type : netconf
diff --git a/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg b/rwcm/test/tg_vrouter_ts_nsd/trafgen_vnfd_1_netconf_template.cfg
new file mode 100644 (file)
index 0000000..02dfc85
--- /dev/null
@@ -0,0 +1,79 @@
+    <vnf-config xmlns="http://riftio.com/ns/riftware-1.0/mano-base">
+      <vnf>
+        <name>trafgen</name>
+        <instance>0</instance>
+        <network-context xmlns="http://riftio.com/ns/riftware-1.0/rw-vnf-base-config">
+          <name>trafgen-lb</name>
+          <interface>
+            <name>N1TenGi-1</name>
+            <bind>
+              <port>trafgen_vnfd/cp0</port>
+            </bind>
+          </interface>
+        </network-context>
+        <port xmlns="http://riftio.com/ns/riftware-1.0/rw-vnf-base-config">
+          <name>trafgen_vnfd/cp0</name>
+          <open/>
+          <application>
+            <rx>rw_trafgen</rx>
+            <tx>rw_trafgen</tx>
+          </application>
+          <receive-q-length>2</receive-q-length>
+          <port-identity>
+          <ip-address><rw_connection_point_name trafgen_vnfd/cp0></ip-address>
+          <port-mode>direct</port-mode>
+          </port-identity>
+          <trafgen xmlns="http://riftio.com/ns/riftware-1.0/rw-trafgen">
+            <transmit-params>
+              <transmit-mode>
+                <range/>
+              </transmit-mode>
+            </transmit-params>
+            <range-template>
+              <destination-mac>
+                <dynamic>
+                  <gateway><rw_connection_point_name vrouter_vnfd/cp0></gateway>
+                </dynamic>
+              </destination-mac>
+              <source-ip>
+                <start><rw_connection_point_name trafgen_vnfd/cp0></start>
+                <minimum><rw_connection_point_name trafgen_vnfd/cp0></minimum>
+                <maximum><rw_connection_point_name trafgen_vnfd/cp0></maximum>
+                <increment>1</increment>
+              </source-ip>
+              <destination-ip>
+                <start><rw_connection_point_name trafsink_vnfd/cp0></start>
+                <minimum><rw_connection_point_name trafsink_vnfd/cp0></minimum>
+                <maximum><rw_connection_point_name trafsink_vnfd/cp0></maximum>
+                <increment>1</increment>
+              </destination-ip>
+              <source-port>
+                <start>10000</start>
+                <minimum>10000</minimum>
+                <maximum>10128</maximum>
+                <increment>1</increment>
+              </source-port>
+              <destination-port>
+                <start>5678</start>
+                <minimum>5678</minimum>
+                <maximum>5678</maximum>
+                <increment>1</increment>
+              </destination-port>
+              <packet-size>
+                <start>512</start>
+                <minimum>512</minimum>
+                <maximum>512</maximum>
+                <increment>1</increment>
+              </packet-size>
+            </range-template>
+          </trafgen>
+        </port>
+      </vnf>
+    </vnf-config>
+    <logging xmlns="http://riftio.com/ns/riftware-1.0/rwlog-mgmt">
+      <sink>
+        <name>syslog</name>
+        <server-address><rw_mgmt_ip></server-address>
+        <port>514</port>
+      </sink>
+    </logging>
diff --git a/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg b/rwcm/test/tg_vrouter_ts_nsd/trafsink_vnfd_3_netconf_template.cfg
new file mode 100644 (file)
index 0000000..6402201
--- /dev/null
@@ -0,0 +1,42 @@
+    <vnf-config xmlns="http://riftio.com/ns/riftware-1.0/mano-base">
+      <vnf>
+        <name>trafsink</name>
+        <instance>0</instance>
+        <network-context xmlns="http://riftio.com/ns/riftware-1.0/rw-vnf-base-config">
+          <name>lb-trafsink</name>
+          <interface>
+            <name>N3TenGigi-1</name>
+            <bind>
+              <port>trafsink_vnfd/cp0</port>
+            </bind>
+          </interface>
+        </network-context>
+        <port xmlns="http://riftio.com/ns/riftware-1.0/rw-vnf-base-config">
+          <name>trafsink_vnfd/cp0</name>
+          <open/>
+          <application>
+            <rx>rw_trafgen</rx>
+            <tx>rw_trafgen</tx>
+          </application>
+          <receive-q-length>2</receive-q-length>
+          <port-identity>
+          <ip-address><rw_connection_point_name trafsink_vnfd/cp0></ip-address>
+          <port-mode>direct</port-mode>
+          </port-identity>
+          <trafgen xmlns="http://riftio.com/ns/riftware-1.0/rw-trafgen">
+            <receive-param>
+              <receive-echo>
+                <on/>
+              </receive-echo>
+            </receive-param>
+          </trafgen>
+        </port>
+      </vnf>
+    </vnf-config>
+    <logging xmlns="http://riftio.com/ns/riftware-1.0/rwlog-mgmt">
+      <sink>
+        <name>syslog</name>
+        <server-address><rw_mgmt_ip></server-address>
+        <port>514</port>
+      </sink>
+    </logging>
diff --git a/rwlaunchpad/CMakeLists.txt b/rwlaunchpad/CMakeLists.txt
new file mode 100644 (file)
index 0000000..5a52897
--- /dev/null
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwlaunchpad)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs
+  mock
+  plugins
+  ra
+  test
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/mock/CMakeLists.txt b/rwlaunchpad/mock/CMakeLists.txt
new file mode 100644 (file)
index 0000000..7695cda
--- /dev/null
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.0)
+
+include(rift_plugin)
+
+set(subdirs
+    plugins
+    )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
diff --git a/rwlaunchpad/mock/README b/rwlaunchpad/mock/README
new file mode 100644 (file)
index 0000000..6f66c17
--- /dev/null
@@ -0,0 +1,44 @@
+
+TO test the LP mocklet via command line:
+
+Part 1:  Run the test server infrastructure
+
+1. ssh into a VM, navigate to your workspace rift root and run ./rift-shell
+2. navigate to:
+    modules/core/mc/rwmc/test
+
+3. run:
+    $ python3 ./mission_control.py -m ethsim -c --skip-prepare-vm --mock --skip-ui
+
+The --skip-ui option prevents the server infrastructure from loading Composer
+and the UI (to save time loading, especially if you are going to be running
+the server in your own dev environment).
+
+Part 2: Run the mocklet
+
+1. repeat step 1 above
+2. navigate to:
+    modules/core/mc/rwlp_dts_mock
+
+
+3. If the rwlp_dts_mock/node_modules directory does not exist, run:
+
+    $ npm install
+
+4. Start the mocklet after the server (mission_control.py) has completed initialization
+
+To start the mocklet:
+
+    $ node lp_mock_client.js
+
+5. After the mocklet has started, open another terminal window (can be
+anywhere that can access the restconf server on your VM) and run the following:
+
+Seed the descriptors and instance config objects, run:
+
+    $ ./set_data.sh <vm-ip-address>
+
+Now you are ready to test retrieving an ns-instance-opdata object
+
+    $ get_ns_instance_opdata.sh <vm-ip-address>
+
diff --git a/rwlaunchpad/mock/data/nfvi-metrics.json b/rwlaunchpad/mock/data/nfvi-metrics.json
new file mode 100644 (file)
index 0000000..8620b5e
--- /dev/null
@@ -0,0 +1,33 @@
+[
+    {
+        "nfvi_metric": {
+                "vm": {
+                    "active_vm": 1,
+                    "inactive_vm": 1
+                },
+                "memory": {
+                    "used": {
+                        "value": 1
+                    },
+                    "total": {
+                        "value": 2
+                    },
+                    "utilization": {
+                        "value": 1
+                    }
+                },
+                "storage" : {
+                    "used": {
+                        "value": 1
+                    },
+                    "total": {
+                        "value": 2
+                    },
+                    "utilization": {
+                        "value": 1
+                    }
+                }
+        }
+    }
+]
+
diff --git a/rwlaunchpad/mock/data/ns-instance-config.json b/rwlaunchpad/mock/data/ns-instance-config.json
new file mode 100644 (file)
index 0000000..29af367
--- /dev/null
@@ -0,0 +1,19 @@
+{
+    "nsr": [
+        {
+            "id": "a636c6de-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "ENABLED"
+        },
+        {
+            "id": "c8c6cc24-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "ENABLED"
+        },
+        {
+            "id": "c8c6cf3a-6dd0-11e5-9e8f-6cb3113b406f",
+            "nsd-ref": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "admin-status": "DISABLED"
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/nsd_catalog.json b/rwlaunchpad/mock/data/nsd_catalog.json
new file mode 100644 (file)
index 0000000..0c6c6ec
--- /dev/null
@@ -0,0 +1,44 @@
+{
+    "nsd": [
+        {
+            "id": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 1",
+            "short-name": "NSD1",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        },
+        {
+            "id": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 2",
+            "short-name": "NSD2",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        },
+        {
+            "id": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "Network Service Descriptor 3",
+            "short-name": "NSD3",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "connection-point": [
+                {
+                    "name": "cp-name"
+                }
+            ]
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/nsr-templates.json b/rwlaunchpad/mock/data/nsr-templates.json
new file mode 100644 (file)
index 0000000..4c512e5
--- /dev/null
@@ -0,0 +1,57 @@
+[
+    {
+        "create_time": 1445876693,
+        "epa_param": {
+                "ovs_acceleration": {
+                    "vm": 2
+                },
+                "ovs_offload": {
+                    "vm": 2
+                },
+                "ddio": {
+                    "vm": 2
+                },
+                "cat": {
+                    "vm": 2
+                },
+                "cmt": {
+                    "vm": 2
+                }
+        },
+        "monitoring_param": [
+            {
+                "id": "monitoring-param-1",
+                "name": "rate",
+                "description": "Generalized rate monitoring param",
+                "group_tag": "group-a",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gbps"
+            },
+            {
+                "id": "monitoring-param-2",
+                "name": "size",
+                "description": "Generalized size monitoring param",
+                "group_tag": "group-a",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gb"
+            },
+            {
+                "id": "monitoring-param-3",
+                "name": "size22",
+                "description": "Generalized size monitoring param",
+                "group_tag": "group-b",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "widget_type": "GAUGE",
+                "units": "gb"
+            }
+        ]
+    }
+]
diff --git a/rwlaunchpad/mock/data/ping-pong-ns-instance-config.json b/rwlaunchpad/mock/data/ping-pong-ns-instance-config.json
new file mode 100644 (file)
index 0000000..e7d6bb7
--- /dev/null
@@ -0,0 +1,10 @@
+{
+    "nsr": [
+        {
+            "id": "f5f41f36-78f6-11e5-b9ba-6cb3113b406f",
+            "nsd-ref": "da1dfbcc-626b-11e5-998d-6cb3113b406f",
+            "admin-status": "ENABLED"
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/ping-pong-nsd.json b/rwlaunchpad/mock/data/ping-pong-nsd.json
new file mode 100644 (file)
index 0000000..7ad9f6b
--- /dev/null
@@ -0,0 +1,118 @@
+{
+    "nsd": [
+        {
+            "id": "da1dfbcc-626b-11e5-998d-6cb3113b406f",
+            "name": "ping-pong-nsd",
+            "vendor": "RIFT.io",
+            "description": "Toy NS",
+            "version": "1.0",
+            "connection-point": [
+                {
+                    "name": "ping-pong-nsd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "ping-pong-nsd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vld": [
+                {
+                    "id": "ba1c03a8-626b-11e5-998d-6cb3113b406f",
+                    "name": "ping-pong-vld",
+                    "short-name": "ping-pong-vld",
+                    "vendor": "RIFT.io",
+                    "description": "Toy VL",
+                    "version": "1.0",
+                    "type": "ELAN",
+                    "vnfd-connection-point-ref": [
+                        {
+                            "member-vnf-index-ref": 0,
+                            "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f",
+                            "vnfd-connection-point-ref": "ping-pong-vnfd/cp0"
+                        }
+                    ]
+                }
+            ],
+            "constituent-vnfd": [
+                {
+                    "member-vnf-index": 0,
+                    "vnfd-id-ref": "ba145e82-626b-11e5-998d-6cb3113b406f"
+                },
+                {
+                    "member-vnf-index": 1,
+                    "vnfd-id-ref": "ba1947da-626b-11e5-998d-6cb3113b406f"
+                }
+            ],
+            "monitoring-param": [
+                 {
+                    "id": "ping-tx-rate-mp",
+                    "name": "Ping Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-rc-rate-mp",
+                    "name": "Ping Receive Rate",
+                    "description": "Ping receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-packet-size-mp",
+                    "name": "Ping Packet Size",
+                    "description": "Ping packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-tx-rate-mp",
+                    "name": "Pong Transmit Rate 2",
+                    "description": "Pong transmit rate",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-rc-rate-mp",
+                    "name": "Pong Receive Rate 2",
+                    "description": "Pong eceive rate",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-packet-size-mp",
+                    "name": "Pong Packet Size",
+                    "description": "Pong packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "TEXTBOX",
+                    "units": "mb"
+                }
+            ]
+        }
+    ]
+}
+
diff --git a/rwlaunchpad/mock/data/ping-pong-vnfd.json b/rwlaunchpad/mock/data/ping-pong-vnfd.json
new file mode 100644 (file)
index 0000000..c96ee40
--- /dev/null
@@ -0,0 +1,396 @@
+{
+    "vnfd": [
+        {
+            "id": "ba145e82-626b-11e5-998d-6cb3113b406f",
+            "name": "ping-vnfd",
+            "short-name": "ping-vnfd",
+            "vendor": "RIFT.io",
+            "description": "This is an example RIFT.ware VNF",
+            "version": "1.0",
+            "internal-vld": [
+                {
+                    "id" : "ba1478fe-626b-11e5-998d-6cb3113b406f",
+                    "name": "fabric",
+                    "short-name": "fabric",
+                    "description": "Virtual link for internal fabric",
+                    "type": "ELAN"
+                }
+            ],
+            "connection-point": [
+                {
+                    "name": "ping-vnfd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "ping-vnfd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vdu": [
+                {
+                    "id": "ba14a504-626b-11e5-998d-6cb3113b406f",
+                    "name": "iovdu",
+                    "count": 2,
+                    "vm-flavor": {
+                        "vcpu-count": 4,
+                        "memory-mb": 1638,
+                        "storage-gb": 16
+                    },
+                    "guest-epa": {
+                        "trusted-execution": true,
+                        "mempage-size": "PREFER_LARGE",
+                        "cpu-pinning-policy": "DEDICATED",
+                        "cpu-thread-pinning-policy": "AVOID",
+                        "numa-node-policy": {
+                            "node-cnt": 2,
+                            "mem-policy": "PREFERRED",
+                            "node": [
+                                {
+                                    "id": 1,
+                                    "vcpu": [ 0, 1 ],
+                                    "memory-mb": 8192
+                                }
+                            ]
+                        }
+                    },
+                    "hypervisor-epa": {
+                            "type": "PREFER_KVM"
+                    },
+                    "host-epa": {
+                        "cpu-model": "PREFER_SANDYBRIDGE",
+                        "cpu-arch": "PREFER_X86_64",
+                        "cpu-vendor": "PREFER_INTEL",
+                        "cpu-socket-count": "PREFER_TWO",
+                        "cpu-feature": [ "PREFER_AES", "PREFER_CAT" ]
+                    },
+                    "image": "rw_openstack.qcow2",
+                    "internal-connection-point": [
+                        {
+                            "id": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        },
+                        {
+                            "id": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        }
+                    ],
+                    "internal-interface": [
+                        {
+                            "name": "eth0",
+                            "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ],
+                    "external-interface": [
+                        {
+                            "name": "eth0",
+                            "vnfd-connection-point-ref": "ping-vnfd/cp0",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vnfd-connection-point-ref": "ping-vnfd/cp1",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ]
+                }
+            ],
+            "monitoring-param": [
+                {
+                    "id": "ping-tx-rate-mp",
+                    "name": "Ping Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-rc-rate-mp",
+                    "name": "Ping Receive Rate",
+                    "description": "Ping receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "ping-packet-size-mp",
+                    "name": "Ping Packet Size",
+                    "description": "Ping packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                }
+            ],
+            "rw-vnfd:control-param": [
+                {
+                    "id": "ping-transmit-rate-cp1",
+                    "name": "Transmit Rate",
+                    "description": "Ping transmit rate",
+                    "group-tag": "group-3",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"ping-transmit-rate-cp1\",\"value\":10} }"
+                },
+                {
+                    "id": "ping-packet-size-cp1",
+                    "name": "Ping Packet Size",
+                    "description": "Packet size",
+                    "group-tag": "group-4",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"ping-packet-size-cp1\",\"value\":10 } }"
+                }
+            ],
+            "rw-vnfd:action-param" : [
+                {
+                    "id": "start-vnfr",
+                    "name": "Start PING",
+                    "description": "Start the PUNG VNFR",
+                    "group-tag": "start-vnfr",
+                    "url": "https://%s/api/operations/start-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"start-vnfr\": { \"id\": \"%s\" }  }"
+                },
+                {
+                    "id": "stop-vnfr",
+                    "name": "Stop PING",
+                    "description": "Stop the PING VNFR",
+                    "group-tag": "stop-vnfr",
+                    "url": "https://%s/api/operations/stop-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"stop-vnfr\": { \"id\": \"%s\" }  }"
+                }
+            ]
+        },
+        {
+            "id": "ba1947da-626b-11e5-998d-6cb3113b406f",
+            "name": "pong-vnfd",
+            "short-name": "pong-vnfd",
+            "vendor": "RIFT.io",
+            "description": "This is an example RIFT.ware VNF",
+            "version": "1.0",
+            "internal-vld": [
+                {
+                    "id" : "ba1478fe-626b-11e5-998d-6cb3113b406f",
+                    "name": "fabric",
+                    "short-name": "fabric",
+                    "description": "Virtual link for internal fabric",
+                    "type": "ELAN"
+                }
+            ],
+            "connection-point": [
+                {
+                    "name": "pong-vnfd/cp0",
+                    "type": "VPORT"
+                },
+                {
+                    "name": "pong-vnfd/cp1",
+                    "type": "VPORT"
+                }
+            ],
+            "vdu": [
+                {
+                    "id": "ba14a504-626b-11e5-998d-6cb3113b406f",
+                    "name": "iovdu",
+                    "count": 2,
+                    "vm-flavor": {
+                        "vcpu-count": 4,
+                        "memory-mb": 1638,
+                        "storage-gb": 16
+                    },
+                    "guest-epa": {
+                        "trusted-execution": true,
+                        "mempage-size": "PREFER_LARGE",
+                        "cpu-pinning-policy": "DEDICATED",
+                        "cpu-thread-pinning-policy": "AVOID",
+                        "numa-node-policy": {
+                            "node-cnt": 2,
+                            "mem-policy": "PREFERRED",
+                            "node": [
+                                {
+                                    "id": 1,
+                                    "vcpu": [ 0, 1 ],
+                                    "memory-mb": 8192
+                                }
+                            ]
+                        }
+                    },
+                    "hypervisor-epa": {
+                            "type": "PREFER_KVM"
+                    },
+                    "host-epa": {
+                        "cpu-model": "PREFER_SANDYBRIDGE",
+                        "cpu-arch": "PREFER_X86_64",
+                        "cpu-vendor": "PREFER_INTEL",
+                        "cpu-socket-count": "PREFER_TWO",
+                        "cpu-feature": [ "PREFER_AES", "PREFER_CAT" ]
+                    },
+                    "image": "rw_openstack.qcow2",
+                    "internal-connection-point": [
+                        {
+                            "id": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        },
+                        {
+                            "id": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "type": "VPORT"
+                        }
+                    ],
+                    "internal-interface": [
+                        {
+                            "name": "eth0",
+                            "vdu-internal-connection-point-ref": "ba153744-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vdu-internal-connection-point-ref": "ba15577e-626b-11e5-998d-6cb3113b406f",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ],
+                    "external-interface": [
+                        {
+                            "name": "eth0",
+                            "vnfd-connection-point-ref": "pong-vnfd/cp0",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        },
+                        {
+                            "name": "eth1",
+                            "vnfd-connection-point-ref": "pong-vnfd/cp1",
+                            "virtual-interface": {
+                                "type": "VIRTIO"
+                            }
+                        }
+                    ]
+                }
+            ],
+            "monitoring-param": [
+                {
+                    "id": "pong-tx-rate-mp",
+                    "name": "Pong Transmit Rate",
+                    "description": "Pong transmit rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-rx-rate-mp",
+                    "name": "Pong Receive Rate",
+                    "description": "Pong receive rate",
+                    "group-tag": "group-1",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "GAUGE",
+                    "units": "gbps"
+                },
+                {
+                    "id": "pong-packet-size-mp",
+                    "name": "Pong Packet Size",
+                    "description": "Pong packet size",
+                    "group-tag": "group-2",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 10,
+                    "widget-type": "TEXTBOX",
+                    "units": "mb"
+                }
+            ],
+            "rw-vnfd:control-param" : [
+                {
+                    "id": "pong-receive-rate-cp1",
+                    "name": "Pong Receive Rate",
+                    "description": "Pong receive rate",
+                    "group-tag": "group-3",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://{host}/api/operations/vnfr-control-param/",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"pong-receive-rate-cp1\",\"value\":10} }"
+                },
+                {
+                    "id": "pong-packet-size-cp1",
+                    "name": "Pong Packaet Size",
+                    "description": "Packet size",
+                    "group-tag": "group-4",
+                    "min-value": 0,
+                    "max-value": 100,
+                    "current-value": 0,
+                    "step-value": 1,
+                    "units": "gbps",
+                    "widget-type": "GAUGE",
+                    "url": "https://%s/api/operations/set-control-param",
+                    "operation": "POST",
+                    "payload": "{\"set-control-param\":{\"id\":\"%s\",\"obj-code\":\"VNFR\",\"control-id\":\"pong-packet-size-cp1\",\"value\":10 } }"
+                }
+            ],
+            "rw-vnfd:action-param" : [
+                {
+                    "id": "start-vnfr",
+                    "name": "Start PONG",
+                    "description": "Start the PONG VNFR",
+                    "group-tag": "start-vnfr",
+                    "url": "https://%s/api/operations/start-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"start-vnfr\": { \"id\": \"%s\" }  }"
+                },
+                {
+                    "id": "stop-vnfr",
+                    "name": "Stop PONG",
+                    "description": "Stop the PONG VNFR",
+                    "group-tag": "stop-vnfr",
+                    "url": "https://%s/api/operations/stop-vnfr",
+                    "operation": "POST",
+                    "payload": "{\"stop-vnfr\": { \"id\": \"%s\" }  }"
+                }
+            ]
+       }
+   ]
+}
diff --git a/rwlaunchpad/mock/data/simmp-rules.json b/rwlaunchpad/mock/data/simmp-rules.json
new file mode 100644 (file)
index 0000000..d92f835
--- /dev/null
@@ -0,0 +1,11 @@
+{
+    "description": "Rules for Simulating monitoring params",
+    "mp-mapper": {
+        "ping-tx-rate-mp": "tx_rc_rate",
+        "ping-rc-rate-mp": "tx_rc_rate",
+        "pong-tx-rate-mp": "tx_rc_rate",
+        "pong-rc-rate-mp": "tx_rc_rate",
+        "ping-packet-size-mp": "packet_size",
+        "pong-packet-size-mp": "packet_size"
+    }
+}
diff --git a/rwlaunchpad/mock/data/vld_catalog.json b/rwlaunchpad/mock/data/vld_catalog.json
new file mode 100644 (file)
index 0000000..0de0e29
--- /dev/null
@@ -0,0 +1,16 @@
+{
+    "vld": [
+        {
+            "id": "a631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-one"
+        },
+        {
+            "id": "b631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-two"
+        },
+        {
+            "id": "c631e8c6-663a-11e5-b122-6cb3113b406f",
+            "name": "vld-three"
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/vnfd_catalog.json b/rwlaunchpad/mock/data/vnfd_catalog.json
new file mode 100644 (file)
index 0000000..1951980
--- /dev/null
@@ -0,0 +1,47 @@
+{
+    "vnfd": [
+        {
+            "id": "a200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "Virtual Network Descriptor 1",
+            "short-name": "VNFD1",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+                    {
+                        "id" : "68981800-7201-11e5-9fc4-bf5ad0442ce5",
+                        "name": "Zathrus",
+                        "short-name": "zathrus",
+                        "description": "Virtual link for zathrus",
+                        "type": "ELAN",
+                        "root-bandwidth": 42,
+                        "leaf-bandwidth": 42,
+                        "internal-connection-point-ref": [
+
+                            ]
+                    }
+              ]
+        },
+        {
+            "id": "b200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "vnfd-two",
+            "short-name": "VNFD2",
+            "vendor": "RIFT.io",
+             "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+
+            ]
+        },
+        {
+            "id": "c200a0a0-663a-11e5-b122-6cb3113b406f",
+            "name": "vnfd-three",
+            "short-name": "VNFD03",
+            "vendor": "RIFT.io",
+            "description": "This is a description. It doesn't say much",
+            "version": "0.0.1",
+            "internal-vld": [
+            ]
+        }
+    ]
+}
diff --git a/rwlaunchpad/mock/data/vnfr-templates.json b/rwlaunchpad/mock/data/vnfr-templates.json
new file mode 100644 (file)
index 0000000..a93dafb
--- /dev/null
@@ -0,0 +1,54 @@
+[
+    {
+        "action_param": [
+            {
+                "id": "actionparam01",
+                "name": "Start Me Up",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "url": "http://localhost:8091/vnfr/1/start"
+            },
+            {
+                "id": "actionparam02",
+                "name": "Stop me",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "url": "http://localhost:8091/vnfr/1/stop",
+                "operation": "POST"
+            }
+        ],
+       "control_param": [
+            {
+                "id": "controlparam01",
+                "name": "Control Param 1",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "step_value": 1,
+                "units": "smoots",
+                "widget_type": "HISTOGRAM",
+                "url": "http://localhost:8091/vnfr/1/control-1",
+                "operation": "POST",
+                "payload": "{ \"test\": \"sample value\" }"
+            },
+            {
+                "id": "controlparam02",
+                "name": "Control Param 2",
+                "description": "This is a description. It doesn't say much",
+                "group_tag": "control-group1",
+                "min_value": 0,
+                "max_value": 100,
+                "current_value": 0,
+                "step_value": 1,
+                "units": "smoots",
+                "widget_type": "GAUGE",
+                "url": "http://localhost:8091/vnfr/1/control-2",
+                "operation": "POST",
+                "payload": "{ \"test\": \"sample value\" }"
+            }
+        ]
+    }
+]
+
diff --git a/rwlaunchpad/mock/data_model.js b/rwlaunchpad/mock/data_model.js
new file mode 100644 (file)
index 0000000..ef56c68
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+ *  This module provides the data model layer for the Launchpad Mocklet
+ */
+
+var util = require('util');
+var uuid = require('node-uuid');
+var _ = require('lodash');
+
+// Our modules
+var simmp_module = require('./simmp.js');
+
+// Data packages
+// TODO: Make these parameters to pass to the data model
+// instead of hardcoding them as requires here
+var simmp_rules = require('./data/simmp-rules.json');
+var nsr_templates = require('./data/nsr-templates.json');
+var vnfr_templates = require('./data/vnfr-templates.json');
+
+/*
+ * Generic  to throw on data model exceptions
+ */
+function DataModelException(message) {
+    this.message = message;
+    this.name = "DataModelException";
+}
+
+/*
+ * This 
+ * This function is temporary until all needed features are implemented in this mocklet
+ */
+function NotImplementedException(message) {
+    this.message = "You have fallen off the edge of the world: "+message;
+    this.name = 'NotImplementedException';
+}
+
+
+/*
+ * Class to handle simulating events over time for monitoring params
+ */
+MonitoringParam = function(values, time_function) {
+    this.values = values;
+    this.timeFunc = time_function;
+}
+
+MonitoringParam.prototype.timeStep = function(elapsed_seconds) {
+    this.values.current_value = this.timeFunc(this.values.current_value,
+            elapsed_seconds);
+    return this.values.current_value;
+};
+
+/*
+ * DataModel constructor
+ *
+ * Arguments
+ *   restconf_host - Host name and port. eg: 'localhost:8008'
+ */
+DataModel = function (restconf_host) {
+    this.restconf_host = restconf_host ? restconf_host : "localhost:8008";
+
+    this.simmp = new simmp_module.SimMp(simmp_rules);
+    if (!this.simmp) {
+        throw "simmp failed to initialize";
+    }
+    // Time data for event simulation (monitoring params)
+    this.start_time = Date.now();
+    this.previous_time =this.start_time;
+
+    // Store descriptors
+    this.descriptors = { nsd: {}, vnfd: {}, vld: {} };
+
+    // Store instance config data. Currently only NS Yang implements config data
+    this.config_records = { nsr: {}, vnfr: {}, vlr: {} };
+
+    // Stores Virtual Network Function instance records
+    this.vnfr_records = { };
+
+    // Stores Network Service instance operational records
+    this.ns_opdata_records = { };
+
+    // Manage which mock data to use next
+    this.vnfr_template_index = 0;
+    this.nsr_template_index = 0;
+
+    // Operational (running) state for opdata records
+    // 'on', 'off'
+    // TBD: do restarting
+    this.opstate = { nsr: {}, vnfr: {} };
+
+    // Store MonitoringParam objects
+    this.monitoring_params = {nsr: {}, vnfr: {} };
+}
+
+
+/*
+ * creates a descriptor name from the record name
+ */
+DataModel.prototype.rec2desc = function (record_type) {
+    if (record_type.charAt(record_type.lenth-1) == 'r') {
+        return record_type.slice(0, -1)+'d';
+    } else if (["ns","vnf","vl"].indexOf(record_type_) != -1) {
+        return record_type + 'd';
+    } else {
+        throw new DataModelException('"%s" is not a supported record type', record_type);
+    }
+};
+
+DataModel.prototype.setDescriptor = function(descriptor_type, descriptor) {
+        if (!this.descriptors.hasOwnProperty(descriptor_type)) {
+            throw new DataModelException('"%s" is not a supported descriptor type', descriptor_type);
+        }
+
+        this.descriptors[descriptor_type][descriptor.id] = descriptor;
+};
+
+DataModel.prototype.setConfigRecord = function(record_type, record) {
+         if (!this.config_records.hasOwnProperty(record_type)) {
+            throw new DataModelException('"%s" is not a supported record type', record_type);
+        }
+
+        this.config_records[record_type][record.id] = record;
+};
+
+DataModel.prototype.findConfigRecord = function(record_type, record_id) {
+        if (this.config_records.hasOwnProperty(record_type)) {
+            return this.config_records[record_type][record_id];
+        } else {
+            return null;
+        }
+};
+
+/*
+ *
+ */
+DataModel.prototype.updateControlParam = function(record_type, record_id,
+        control_id, value) {
+    if (record_type == 'vnfr') {
+        var record = this.vnfr_records[record_id];
+    } else {
+        var record = this.ns_opdata_records[record_id];
+    }
+    // find the control param
+    if ('control_param' in record) {
+        for (var i=0; i < record.control_param.length; i++) {
+            if (control_id == record.control_param[i].id) {
+                // Make sure value is within min and max values
+                if (value >= record.control_param[i].min_value &&
+                    value <= record.control_param[i].max_value) {
+
+                    record.control_param[i].current_value = value;
+                    return 'SUCCESS';
+                } else {
+                    var errmsg = 'value "'+value+'" out of range. '+
+                        'Needs to be within '+ record_control_param[i].min_value +
+                        ' and ' + record_control_param[i].max_value;
+                    throw new DataModelException(errmsg);
+                }
+            }
+        }
+    } else {
+        var errmsg = 'Record type "' + record_type + '" with id "'+
+            record_id + '" does not have any control params';
+        throw new DataModelException(errmsg);
+    }
+};
+
+/*
+ * NS functions
+ *
+ * General comments on NS instance config/opdata:
+ *  For each ns-instance-config, the descriptor needs to be added first
+ */
+
+// TODO: Consolidate the template handling functions
+DataModel.prototype.nextNsrTemplate = function() {
+    var nsr_template = _.clone(nsr_templates[this.nsr_template_index], true);
+    this.nsr_template_index += 1;
+    if (this.nsr_template_index >= nsr_templates.length) {
+        this.nsr_template_index = 0;
+    }
+    return nsr_template;
+};
+
+DataModel.prototype.getNsdConnectionPoints = function(nsd_id) {
+    var nsd =  this.descriptors['nsd'][nsd_id];
+    if (!nsd) {
+        throw new DataModelException("NSD ID '%s' does not exist", nsd_id);
+    }
+    // console.log("\n\nnsd = %s", JSON.stringify(nsd));
+    return nsd['connection_point'];
+};
+
+
+DataModel.prototype.createNsrControlParams = function(ns_instance_config_id) {
+    // TODO: find all VNFDs associated with this NS instance
+    // then either call this.createVnfrControlParams if you want to talk
+    // VNFR specific control params or we can generalize 'createVnfrControlParams'
+    // to pass in 'record_id' instead of vnfr_id.
+    //
+    var control_params = [];
+
+    return control_params;
+};
+
+/*
+ * Sets an ns-instance-config object record and creates an
+ * ns-instance-opdata record.
+ *
+ * If the NS instance opdata record matching the id of the ns-instance-config
+ * already exists, then remove the ns-instance-opdate record and reconstruct.
+ */
+DataModel.prototype.setNsInstanceConfig = function(ns_instance_config) {
+    // we are updating an existing ns-instance record set
+    // There is an issue that subsequent 'PUT' actions do not transfer
+    // the whole data to the mocklet. So we need to retrieve the existingt
+    // ns-instance-config to get the nsd-ref
+
+    // TODO: Consider creating a 'set_or_update' method for ns-instance-config
+    var ns_config = this.findConfigRecord('nsr', ns_instance_config.id);
+    if (ns_config) {
+        ns_config.admin_status = ns_instance_config.admin_status;
+    } else {
+        this.setConfigRecord('nsr', ns_instance_config);
+        ns_config = ns_instance_config;
+    }
+    if (ns_config.id in this.ns_opdata_records) {
+        delete this.ns_opdata_records[ns_config.id];
+    }
+    // if ns-instance-config is 'ENABLED', then create an ns-instance-opdata
+    if (ns_config.admin_status == 'ENABLED') {
+        ns_opdata = this.generateNsInstanceOpdata(ns_config);
+        // set the ns instance opdata. Doesn't matter if it already exists
+        this.ns_opdata_records[ns_opdata.ns_instance_config_ref] = ns_opdata;
+    }
+};
+
+DataModel.prototype.generateMonitoringParams = function(descriptor_type, descriptor_id) {
+    console.log('Called generateMonitoringParams');
+    if (!(descriptor_type in this.descriptors)) {
+        throw DataModelException('descriptor type "%s" not found');
+    }
+    var descriptor = this.descriptors[descriptor_type][descriptor_id];
+    var a_simmp = this.simmp;
+    if (descriptor) {
+        if ('monitoring_param' in descriptor) {
+            return descriptor['monitoring_param'].map(function(obj) {
+                var simFunc = a_simmp.createSimMonitorFunc(obj);
+                return new MonitoringParam(_.clone(obj, true), simFunc);
+            });
+        } else {
+            console.log('Descriptor(type=%s) with (id=%s) does not have ' +
+               'monitoring params', descriptor_type, descriptor_id);
+            return [];
+        }
+    } else {
+        throw new DataModelException("Cannot find descriptor %s with id '%s'",
+                descriptor_type, descriptor_id);
+    }
+};
+
+DataModel.prototype.updateMonitoringParams = function(instance_type, instance_id) {
+    var sim_mp = this.monitoring_params[instance_type][instance_id];
+    if (sim_mp) {
+        var time_now = Date.now();
+        var elapsed_seconds = (time_now - this.previous_time) / 1000;
+        var monitoring_params = sim_mp.map(function(obj) {
+            obj.timeStep(elapsed_seconds);
+            return obj.values;
+        });
+        this.previous_time = time_now;
+        return monitoring_params;
+    } else {
+        // TODO: Figure out hosw we want to handle this case
+        return [];
+    }
+};
+
+/*
+ * Creates an ns-instance-opdata object, but does not add it to the data
+ * store.
+ */
+DataModel.prototype.generateNsInstanceOpdata = function (ns_config) {
+    var nsr_template = this.nextNsrTemplate();
+
+    // HACK: We need to get control and action param from the nsr
+    // or have a function that synchronizes the next array element in
+    // the templates
+    var vnfr_template = this.nextVnfrTemplate();
+
+    var nsd_id = ns_config.nsd_ref;
+    var connection_points = this.getNsdConnectionPoints(ns_config.nsd_ref);
+    var sim_mp = this.generateMonitoringParams('nsd', nsd_id);
+    // save for using in update
+    this.monitoring_params['nsr'][ns_config.id] = sim_mp;
+    var monitoring_params = sim_mp.map(function(obj) {
+        // not time stepping when we create them
+        return obj.values;
+    });
+
+    return {
+        ns_instance_config_ref: ns_config.id,
+        'connection_point' : _.clone(connection_points, true),
+        epa_param: _.clone(nsr_template['epa_param'], true),
+        // NOTE: Remarked out until nfvi metrics figured out
+        //nfvi_metric: _.clone(nsr_template['nfvi_metric'], true),
+        monitoring_param: monitoring_params,
+        //monitoring_param: _.clone(nsr_template['monitoring_param'], true),
+        create_time: nsr_template['create_time'],
+        action_param: vnfr_template['action_param'],
+        // TODO: control_param: this.createNsrControlParams(ns_config.id);
+        control_param: vnfr_template['control_param']
+    };
+};
+
+DataModel.prototype.getNsInstanceOpdata = function() {
+    var opdata_records = [];
+    var config_records = this.config_records['nsr'];
+    for (config_record_id in config_records) {
+        if (config_records[config_record_id]['admin_status'] == 'ENABLED') {
+            console.log('Is ENABLED: ns-instance-config record with id %s', config_record_id);
+
+            ns_op_rec = this.ns_opdata_records[config_record_id];
+            if (ns_op_rec) {
+                // TODO: update monitoring params
+                ns_op_rec.monitoring_param = this.updateMonitoringParams(
+                        'nsr', config_record_id);
+                opdata_records.push(ns_op_rec);
+            } else {
+                console.log('NO RECORD FOUND for ns config id: %s', config_record_id);
+            }
+        } else {
+            console.log('Either no admin status record or not enabled');
+        }
+    }
+    return opdata_records;
+};
+
+
+/* =============
+ * VNF functions
+ * =============
+ */
+
+/*
+ * Gets the next VNFR template from the array of VNFR templates and 
+ * increments the VNFR template counter. Wraps back to the first VNFR
+ * template when the last one is used.
+ */
+DataModel.prototype.nextVnfrTemplate = function() {
+    var vnfr_template = _.clone(vnfr_templates[this.vnfr_template_index], true);
+    this.vnfr_template_index += 1;
+    if (this.vnfr_template_index >= vnfr_templates.length) {
+        this.vnfr_template_index = 0;
+    }
+    return vnfr_template;
+}
+
+/*
+ * Arguments
+ *  vnfd - VNF Descriptor object
+ *  vnfr_id - VNFR unique identifier
+ *  host  - host name and port
+ */
+DataModel.prototype.createVnfrActionParams = function(vnfd, vnfr_id) {
+    // Canned start, stop for now
+    // TBD: read action params from VNFD and create here
+    // Use
+    var action_param = [
+        {
+            id: uuid.v1(),
+            name: "Start Me",
+            description: "Start this VNFR",
+            group_tag: "start-vnfr",
+            url: "https://"+this.restconf_host+"/api/operations/start-vnfr",
+            operation: "POST",
+            payload: '{"start-vnfr": { "id": "'+vnfr_id+'"}}'
+        },
+        {
+            id: uuid.v1(),
+            name: "Stop Me",
+            description: "Stop this VNFR",
+            group_tag: "stop-vnfr",
+            url: "https://"+this.restconf_host+"/api/operations/stop-vnfr",
+            operation: "POST",
+            payload: '{"stop-vnfr": { "id": "'+vnfr_id+'"}}'
+        }
+    ];
+    return action_param;
+};
+
+DataModel.prototype.createVnfrControlParams = function(vnfd, vnfr_id,
+        vnfr_template) {
+    console.log("Called Datamodel.prototype.createVnfrControlParams");
+    if (vnfr_template) {
+        console.log("returning clone of vnfr_template['control_param']");
+        return _.clone(vnfr_template['control_param'], true);
+    } else {
+        if (vnfd.control_param) {
+            console.log("VNFD's control-param="+JSON.stringify(vnfd.control_param));
+            var a_restconf_host = this.restconf_host;
+            var cp_arry = _.clone(vnfd.control_param, true);
+            var control_params = vnfd.control_param.map(function(obj) {
+                var cp = _.clone(obj, true);
+                cp.url = util.format(cp.url, a_restconf_host);
+                console.log("\ncontrol-param payload before:"+ cp.payload);
+                cp.payload = util.format(cp.payload, vnfr_id);
+                console.log("\ncontrol-param payload after:"+ cp.payload+"\n");
+                return cp;
+            });
+            return control_params;
+        } else {
+            return [];
+        }
+        throw new NotImplementedException("createVnfrControlParam: non-template");
+    }
+}
+
+/*
+ * Creates a new VNFR base on the VNFD in the argument.
+ * This method is intended to not have side effects, otherwise
+ * just put this code in this.addVnfData
+ */
+DataModel.prototype.createVnfr = function(vnfd) {
+    //var vnfr_template = this.nextVnfrTemplate();
+    var vnfr_id = uuid.v1();
+
+    return {
+        id: vnfr_id,
+        // Hack: Copy the VNFD values but append '-Record' to end
+        name: vnfd.name + ' Record',
+        short_name: vnfd.short_name + '_REC',
+        vendor: vnfd.vendor,
+        description: vnfd.description,
+        version: vnfd.version,
+        vnfd_ref: vnfd.id,
+        internal_vlr: [],
+        // Even though this is in the Yang, it doesn't exist in the
+        // instantiated model:
+        // 'internal_connection_point_ref': [],
+        action_param: this.createVnfrActionParams(vnfd, vnfr_id),
+        //control_param: _.clone(vnfr_template['control_param'], true)
+        control_param: this.createVnfrControlParams(vnfd, vnfr_id)
+    };
+};
+
+
+/*
+ * Creates and adds a new VNFD and matching VNFR record to our data store
+ *
+ * TODO: Might need to be updated so we create a VNFR when a start VNFR is called
+ *
+ */
+DataModel.prototype.addVnfData = function(vnfd) {
+    // if the vnfd does not already exist:
+    if (this.descriptors['vnfd'][vnfd.id] == null) {
+        console.log("adding new vnfd with id %s", vnfd.id);
+        this.setDescriptor('vnfd', vnfd);
+        // create a vnfr record, but without monitoring-param
+        var vnfr = this.createVnfr(vnfd);
+
+        var sim_mp = this.generateMonitoringParams('vnfd', vnfd.id);
+        // save for using in update
+        this.monitoring_params['vnfr'][vnfr.id] = sim_mp;
+        vnfr.monitoring_param = sim_mp.map(function(obj) {
+            // not time stepping when we create them
+            return obj.values;
+        });
+        this.vnfr_records[vnfr.id] = vnfr;
+    } else {
+        // do nothing
+    }
+};
+
+
+DataModel.prototype.getVnfrs = function () {
+    records = [];
+    for (vnfr_id in this.vnfr_records) {
+        // When admin-status is implemented, then return only those 'ENABLED'
+        var vnfr_record = this.vnfr_records[vnfr_id];
+        vnfr_record.monitoring_param = this.updateMonitoringParams(
+                'vnfr', vnfr_id);
+        records.push(vnfr_record);
+    }
+    return records;
+}
+
+
+// Move the following to a new VnfrManager class
+
+DataModel.prototype.startVnfr = function(vnfr_id) {
+    console.log('Calling DataModel.startVnfr with id "%s"', vnfr_id);
+
+    console.log('Here are the VNFR ids we have:');
+    for (key in this.vnfr_records) {
+        console.log('id:%s"', key);
+    }
+    //console.log('vnfr_records = %s', JSON.stringify(this.vnfr_records));
+
+    if (!(vnfr_id in this.vnfr_records)) {
+        var errmsg = 'Cannot find vnfr record with id "'+vnfr_id+'"';
+        console.error('\n\n'+errmsg+'\n\n');
+        throw new DataModelException(errmsg);
+    }
+    // Just add/set it
+    this.opstate.vnfr[vnfr_id] = 'ON';
+    return this.vnfr_records[vnfr_id];
+}
+
+DataModel.prototype.stopVnfr = function(vnfr_id) {
+    console.log('Calling DataModel.stopVnfr with id "%s"', vnfr_id);
+    if (!(vnfr_id in this.vnfr_records)) {
+        var errmsg = 'Cannot find vnfr record with id "'+vnfr_id+'"';
+        console.error(errmsg);
+        throw new DataModelException(errmsg);
+    }
+    // Just add/set it
+    this.opstate.vnfr[vnfr_id] = 'OFF';
+    return this.vnfr_records[vnfr_id];
+}
+
+DataModel.prototype.vnfrRunningState = function(vnfr_id) {
+    if (!(vnfr_id in this.vnfr_records)) {
+        throw new DataModelException(
+                'DataModel.stopVnfr: Cannot find VNFR with id "%s"', vnfr_id);
+    }
+    if (vnfr_id in this.opstate.vnfr) {
+        return this.opstate.vnfr[vnfr_data];
+    } else {
+        // Assume we are 'ON'
+        return 'ON';
+    }
+}
+
+
+/* ==========================
+ * Debug and helper functions
+ * ==========================
+ */
+
+DataModel.prototype.prettyPrint = function (out) {
+    if (out == undefined) {
+        out = console.log;
+    }
+    out('Descriptors:');
+    for (descriptor_type in this.descriptors) {
+        out("Descriptor type: %s", descriptor_type);
+        for (descriptor_id in this.descriptors[descriptor_type]) {
+            out("data=%s",descriptor_id,
+                    JSON.stringify(this.descriptors[descriptor_type][descriptor_id]));
+        };
+    };
+
+    out('\nConfigRecords:');
+    for (record_type in this.config_records) {
+        out("Record type: %s", record_type);
+        for (record_id in this.config_records[record_type]) {
+            out("data=%s", record_id,
+                    JSON.stringify(this.config_records[record_type][record_id]));
+        };
+    };
+};
+
+
+module.exports = {
+    DataModelException: DataModelException,
+    NotImplementedException: NotImplementedException,
+    MonitoringParam: MonitoringParam,
+    DataModel: DataModel
+};
+
diff --git a/rwlaunchpad/mock/get_data.sh b/rwlaunchpad/mock/get_data.sh
new file mode 100755 (executable)
index 0000000..508275c
--- /dev/null
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This is a convenience script to get descriptors from the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Getting descriptor data from $IP"
+
+for descriptor in nsd vnfd vld
+do
+
+    printf "retrieving $descriptor:\n\n"
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/running/$descriptor-catalog/
+
+done
+
+rectype='ns'
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/running/$rectype-instance-config/
+
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -H "accept: application/vnd.yang.data+json" \
+        http://$HOST:8008/api/operational/$rectype-instance-opdata/
+
+
+
diff --git a/rwlaunchpad/mock/get_ns_instance_opdata.sh b/rwlaunchpad/mock/get_ns_instance_opdata.sh
new file mode 100755 (executable)
index 0000000..582fec1
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Getting NS instance opdata from $IP"
+
+curl --user admin:admin \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -H "accept: application/vnd.yang.data+json" \
+    http://$HOST:8008/api/operational/ns-instance-opdata/
+
diff --git a/rwlaunchpad/mock/lp_mock_client.js b/rwlaunchpad/mock/lp_mock_client.js
new file mode 100644 (file)
index 0000000..6de0842
--- /dev/null
@@ -0,0 +1,317 @@
+AUTOBAHN_DEBUG = true;
+var autobahn = require('autobahn');
+var uuid = require('node-uuid');
+var _ = require('lodash');
+
+// Our modules
+var dm = require('./data_model.js');
+
+
+var DUMP_RESULTS = false;
+
+// TODO: make the url be configurable via command line arg
+var connection = new autobahn.Connection({
+    url: 'ws://localhost:8090/ws',
+    realm: 'dts_mock'
+});
+
+// Instance of our data model/data store
+var dataModel = new dm.DataModel();
+
+var descriptor_module = (function () {
+
+    my = {};
+
+    /*
+     * This function sets descriptors in the dataModel
+     */
+    function on_config_descriptor_catalog(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_config_descriptor_catalog:\n    (xpath: %s)(msg: %j)", xpath, msg);
+
+            var descriptor_type = xpath.match(new RegExp(/(nsd|vnfd|vld)-catalog/))[1];
+
+            if (descriptor_type in msg) {
+                msg[descriptor_type].forEach(function(entry) {
+                    console.log('Assigning descriptor "%s" with id %s',
+                        descriptor_type, entry.id);
+                    if (descriptor_type == 'vnfd') {
+                        console.log('-- Adding VNFR data');
+                        dataModel.addVnfData(entry);
+                    } else {
+                        // Simply assign
+                        dataModel.setDescriptor(descriptor_type, entry);
+                    }
+                });
+            }
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function (session) {
+        console.log('Registering for descriptor handling');
+        session.subscribe('dts.config.nsd-catalog', on_config_descriptor_catalog);
+        session.subscribe('dts.config.vnfd-catalog', on_config_descriptor_catalog);
+        session.subscribe('dts.config.vld-catalog', on_config_descriptor_catalog);
+    };
+
+    return my;
+}());
+
+
+var instance_module = (function () {
+    my = {};
+
+   function on_config_config(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_config_config:\n    (xpath: %s)(msg: %j)", xpath, msg);
+
+            var record_type = xpath.match(new RegExp(/(ns|vnf|vl)-instance-config/))[1];
+            record_type += 'r';
+
+            console.log('record_type = %s', record_type);
+
+            if (record_type in msg) {
+                msg[record_type].forEach(function(entry) {
+                    console.log('Assigning record (%s) id=%s, descriptor: id=%s',
+                       record_type, entry.id, entry.nsd_ref);
+                    if (record_type == 'nsr') {
+                        dataModel.setNsInstanceConfig(entry);
+                    } else {
+                        // vnfd, vld, which don't have instance_config records yet
+                        dataModel.setConfigRecord(record_type, entry);
+                    }
+                });
+            }
+
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    /*
+     * Get all nsr opdata records:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr
+     *   msg: {"nsr":[{"ns_instance_config_ref":""}]}
+     *
+     * Get Ping Pong nsr opdata record:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='f5f41f36-78f6-11e5-b9ba-6cb3113b406f']
+     *   msg: {"nsr":[{"ns_instance_config_ref":"f5f41f36-78f6-11e5-b9ba-6cb3113b406f"}]}
+     *
+     * Get monitoring param for nsr instance opdata record:
+     *   xpath: D,/nsr:ns-instance-opdata/nsr:nsr[nsr:ns-instance-config-ref='f5f41f36-78f6-11e5-b9ba-6cb3113b406f']
+     *   msg: {
+     *          "nsr":[{
+     *              "monitoring_param":[{"id":""}],
+     *              "ns_instance_config_ref":"f5f41f36-78f6-11e5-b9ba-6cb3113b406f"
+     *          }]}
+     *
+     * Note that the xpath arg is identical in getting the entire NSR and getting sub-elements in the NSR
+     * The message tells what values to get
+     */
+    function on_get_opdata(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+            //console.log("\n\n*** Got on_get_opdata:\n   (xpath: %s)(msg: %j)", xpath, msg);
+            console.log("*** Got on_get_opdata:\n   (xpath: %s)(msg: %j)", xpath, msg);
+
+            var record_type = xpath.match(new RegExp(/(ns|vnf|vl)-instance-opdata/))[1];
+            record_type += 'r';
+
+            var gi_type_map = {
+                "nsr": "RwNsrYang.YangData_Nsr_NsInstanceOpdata",
+                "vnfr": "VnfrYang.YangData_Vnfr_VnfInstanceOpdata_Vnfr",
+                "vlr": "VlrYang.YangData_Vlr_VlInstanceOpdata_Vlr"
+            };
+
+            if (record_type == 'nsr') {
+                //console.log("###################\n   data model:\n\n");
+                //dataModel.prettyPrint();
+                var response = {
+                    'nsr': dataModel.getNsInstanceOpdata()
+                };
+                var respond_xpath = 'D,/nsr:ns-instance-opdata';
+            } else {
+                throw new dm.NotImplementedException(
+                        "record_type '%s' is not yet supported.", record_type);
+            }
+
+            var result = new autobahn.Result([
+                'RwNsrYang.YangData_Nsr_NsInstanceOpdata',
+                response
+            ], {"xpath": respond_xpath});
+
+            if (DUMP_RESULTS)
+                console.log("result=\n%s", JSON.stringify(result) );
+
+            return result;
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    function on_get_vnfr_catalog(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+            console.log("*** Got on_vnfr_catalog:\n   (xpath: %s)(msg: %j)", xpath, msg);
+
+            var response = {
+                'vnfr': dataModel.getVnfrs()
+            };
+            var respond_xpath = 'D,/vnfr:vnfr-catalog';
+
+            var result = new autobahn.Result([
+                'RwVnfrYang.YangData_Vnfr_VnfrCatalog',
+                response
+            ], {"xpath": respond_xpath});
+
+            if (DUMP_RESULTS)
+                console.log("result=\n%s", JSON.stringify(result) );
+
+            return result;
+        } catch(e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function (session) {
+        console.log('Registering for record handling');
+        session.register('dts.data.ns-instance-opdata', on_get_opdata);
+        session.register('dts.data.vnfr-catalog', on_get_vnfr_catalog);
+        session.subscribe('dts.config.ns-instance-config', on_config_config);
+    }
+
+    return my;
+}());
+
+
+var action_module = (function() {
+    my = {};
+
+    /*
+     * Set the specified VNFR operating state
+     *
+     * (xpath: I,/lpmocklet:start-vnfr)
+     * (msg: {"id":"f26b90b0-8184-11e5-bc47-2b429643382b"})
+     */
+    function on_set_opstate(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_start_vnfr:\n    (xpath: %s)(msg: %j)",
+                xpath, msg);
+            var action_match = xpath.match(new RegExp(/lpmocklet:(\w+)-(\w+)/));
+            var action = action_match[1];
+            var obj_type = action_match[2];
+
+            var record_id = msg['id'];
+            console.log('action="%s", obj_type="%s", record_id="%s"',
+                    action, obj_type, record_id);
+
+            if (obj_type == 'vnfr') {
+                if (action == 'start') {
+                    dataModel.startVnfr(record_id);
+                }
+                else if (action == 'stop') {
+                    dataModel.stopVnfr(record_id);
+                }
+                else {
+                    console.error('Unsupported opstate action "%s"', action);
+                }
+            } else {
+                console.error('Unsupported opstate action object: "%s"',
+                        obj_type);
+            }
+
+            console.log('\n\nBuilding response....');
+
+            var response = {
+                id: uuid.v1(),
+                object_type: obj_type,
+                action: action,
+                status: 'SUCCESS' 
+            };
+            var respond_xpath = 'D,/lpmocklet:lpmocklet-action-status';
+            var result = new autobahn.Result([
+                    'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus',
+                    response
+                    ], {"xpath": respond_xpath});
+
+            console.log('Done running on_set_opdata');
+            return result;
+
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    function on_set_control_param(args) {
+        try {
+            var xpath = args[0];
+            var msg = args[1];
+
+            console.log("\n\n*** Got on_set_control_param:\n    (xpath: %s)(msg: %j)",
+                xpath, msg);
+
+            // We can ignore xpath. We expect: "I,/lpmocklet:set-control-param"
+// msg: {"set":{"id":"f8d63b30-84b3-11e5-891c-61c6a71edd3c","obj_code":"VNFR","control_id":"ping-packet-size-1","value":10}}
+
+            var response_class = 'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus';
+            var status = dataModel.updateControlParam(
+                    msg.obj_code.toLowerCase(),
+                    msg.id,
+                    msg.control_id,
+                    msg.value);
+
+            var response = {
+                id: uuid.v1(),
+                object_type: msg.obj_code,
+                action: msg.control_id,
+                status: status
+            };
+
+            var respond_xpath = 'D,/lpmocklet:lpmocklet-action-status';
+            var result = new autobahn.Result([
+                    'LpmockletYang.YangData_Lpmocklet_LpmockletActionStatus',
+                    response
+                    ], {"xpath": respond_xpath});
+
+            console.log('Done running on_set_opdata');
+            return result;
+        } catch (e) {
+            console.error("Caught exception: %s\n\n%s", e, e.stack);
+        }
+    }
+
+    my.register = function(session) {
+        console.log('Registering for action handling');
+        session.register('dts.rpc.start-vnfr', on_set_opstate);
+        session.register('dts.rpc.stop-vnfr', on_set_opstate);
+        session.register('dts.rpc.set-control-param', on_set_control_param);
+    }
+
+    return my;
+
+}());
+
+
+connection.onopen = function (session) {
+    console.log('Connection to wamp server established!');
+    descriptor_module.register(session);
+    instance_module.register(session);
+    action_module.register(session);
+}
+
+console.log('Opening autobahn connection');
+connection.open();
+
diff --git a/rwlaunchpad/mock/package.json b/rwlaunchpad/mock/package.json
new file mode 100644 (file)
index 0000000..51e5d89
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "name": "rw.lp.dts.mock",
+  "version": "1.0.0",
+  "description": "This is node js launchpad dts mock client.",
+  "main": "lp_mock_client.js",
+  "scripts": {
+    "start": "node lp_mock_client"
+  },
+  "author": "JohnBaldwin",
+  "license": "Apache-2.0",
+  "dependencies": {
+    "autobahn": "~0.9.6",
+    "lodash": "~3.10.1",
+    "node-uuid": "~1.4.3",
+    "mocha": "~2.3.3"
+  },
+  "devDependencies": {}
+}
diff --git a/rwlaunchpad/mock/plugins/CMakeLists.txt b/rwlaunchpad/mock/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..a10246d
--- /dev/null
@@ -0,0 +1,25 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.0)
+
+set(subdirs
+    yang
+    )
+
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
diff --git a/rwlaunchpad/mock/plugins/yang/CMakeLists.txt b/rwlaunchpad/mock/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..2d8f2d9
--- /dev/null
@@ -0,0 +1,32 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+include(rift_yang)
+include(rift_plugin)
+
+rift_add_yang_target(
+    TARGET
+        lpmocklet_yang
+    YANG_FILES
+        lpmocklet.yang
+    COMPONENT
+        ${PKG_LONG_NAME}
+    LIBRARIES
+        mano-types_yang_gen
+)
+
diff --git a/rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang b/rwlaunchpad/mock/plugins/yang/lpmocklet.tailf.yang
new file mode 100644 (file)
index 0000000..0579add
--- /dev/null
@@ -0,0 +1,50 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module lpmocklet-annotation
+{
+    namespace "http://riftio.com/ns/riftware-1.0/lpmocklet-annotation";
+    prefix "lpmocklet-ann";
+
+    import tailf-common {
+        prefix tailf;
+    }
+
+    import lpmocklet {
+        prefix lpmocklet;
+    }
+
+    tailf:annotate "/lpmocklet:lpmocklet-action-status" {
+        tailf:callpoint rw_callpoint;
+    }
+
+    tailf:annotate "/lpmocklet:set-control-param" {
+        tailf:actionpoint rw_actionpoint;
+    }
+
+    tailf:annotate "/lpmocklet:start-vnfr" {
+        tailf:actionpoint rw_actionpoint;
+    }
+
+    tailf:annotate "/lpmocklet:stop-vnfr" {
+        tailf:actionpoint rw_actionpoint;
+    }
+}
+
diff --git a/rwlaunchpad/mock/plugins/yang/lpmocklet.yang b/rwlaunchpad/mock/plugins/yang/lpmocklet.yang
new file mode 100644 (file)
index 0000000..819ee40
--- /dev/null
@@ -0,0 +1,111 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module lpmocklet
+{
+    namespace "http://riftio.com/ns/riftware-1.0/lpmocklet";
+    prefix "lpmocklet";
+
+    import rw-pb-ext {
+        prefix "rwpb";
+    }
+
+    import ietf-inet-types {
+        prefix "inet";
+    }
+
+    import ietf-yang-types {
+        prefix "yang";
+    }
+
+    import mano-types {
+        prefix "manotypes";
+    }
+
+    // Used for LP Mocklet RPC action-param and control-param responses
+    container lpmocklet-action-status {
+        config false;
+        leaf id {
+            type yang:uuid;
+        }
+        // TODO: Make this consistent with 'set-control-param' 'obj-code'
+        leaf object_type {
+            type string;
+        }
+        leaf action {
+            type string;
+        }
+        leaf status {
+            type string;
+        }
+    }
+
+    rpc set-control-param {
+        input {
+            leaf id {
+                description "object id";
+                type yang:uuid;
+                mandatory true;
+            }
+            leaf obj-code {
+                description "Type of object: NS, VNF";
+                type string;
+                mandatory true;
+            }
+            leaf control-id {
+                type string;
+                mandatory true;
+            }
+            // The new vale to assign
+            leaf value {
+                type uint64;
+                mandatory true;
+            }
+        }
+    }
+
+    rpc start-vnfr {
+        input {
+            rwpb:msg-new "StartVnfrInput";
+            leaf id {
+                type yang:uuid;
+                mandatory true;
+            }
+        }
+        output {
+            rwpb:msg-new "StartVnfrOutput";
+            leaf status {
+                description "status of the start request";
+                type string;
+            }
+        }
+    }
+
+    rpc stop-vnfr {
+        input {
+            rwpb:msg-new "StopVnfr";
+            leaf id {
+                type yang:uuid;
+                mandatory true;
+            }
+        }
+    }
+}
+
diff --git a/rwlaunchpad/mock/set_data.sh b/rwlaunchpad/mock/set_data.sh
new file mode 100755 (executable)
index 0000000..4a39c0a
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This script posts descriptor data (NSD, VNFD, VLD) to the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+#
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Posting descriptor data to $HOST"
+
+
+#for descriptor in nsd vnfd vld
+
+for descriptor in nsd vnfd
+do
+    echo "Assigning data to descriptor \"$descriptor\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/${descriptor}_catalog.json \
+        http://$HOST:8008/api/running/$descriptor-catalog/ -v
+
+done
+
+for rectype in ns
+do
+    echo "Assigning data to instance config \"$rectype\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/${rectype}-instance-config.json \
+        http://$HOST:8008/api/running/$rectype-instance-config/ -v
+
+done
+
diff --git a/rwlaunchpad/mock/set_ping_pong.sh b/rwlaunchpad/mock/set_ping_pong.sh
new file mode 100755 (executable)
index 0000000..11126bd
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+#
+# This script posts descriptor data (NSD, VNFD, VLD) to the RESTConf server
+#
+# Provide the RESTConf hostname as the argument or default to localhost
+#
+
+if [ $# -eq 0 ] ; then
+    HOST=localhost
+else
+    HOST=$1
+fi
+
+echo "Posting descriptor data to $HOST"
+
+for rectype in vnfd nsd
+do
+    echo "Assigning data to instance config \"$rectype\""
+
+    curl --user admin:admin \
+        -H "Content-Type: application/vnd.yang.data+json" \
+        -X POST \
+        -d @data/ping-pong-${rectype}.json \
+        http://$HOST:8008/api/running/$rectype-catalog/ -v
+
+    # Add sleep here if vnfd is not ready on server
+done
+
+curl --user admin:admin \
+    -H "Content-Type: application/vnd.yang.data+json" \
+    -X POST \
+    -d @data/ping-pong-ns-instance-config.json \
+    http://$HOST:8008/api/running/ns-instance-config/ -v
+
diff --git a/rwlaunchpad/mock/simmp.js b/rwlaunchpad/mock/simmp.js
new file mode 100644 (file)
index 0000000..9d0628f
--- /dev/null
@@ -0,0 +1,87 @@
+
+var _ = require('lodash');
+
+/*
+ * Args:
+ * rules - object with the monitoring param to simulator function mapping
+ *         see data/simmp.json
+ */
+SimMp = function(rules) {
+    this.rules = _.clone(rules, true);
+};
+
+//SimMp.prototype.map_rule = function(mp_id) {
+//    return this.rules['mp-mapper'][mp_id];
+//}
+
+// Use the monitoring param id for now
+SimMp.prototype.createSimMonitorFunc = function(mp) {
+
+    // Define our core simulation function here
+    //
+    // min, max inclusive
+    var rand_func = function(min, max) {
+        return Math.floor(Math.random() * (max-min+1)) + min;
+    }
+
+    var funcs = {
+        // transmit and receive rate
+        tx_rc_rate: function(value, elapsed_seconds) {
+            // Ignore elapsed time for first implementation of transmit and
+            // receive rate simulation.
+            // This is just a quick and dirty and simple implementation to make
+            // the monitoring params change, stay within bounds, and not swing
+            // wildly.
+            var min_val = mp.min_value;
+            var max_val = mp.max_value;
+            // Set an outer bound of maxmium change from current value
+            // Tweak bin_count to set how much the value can swing from the
+            // last value
+            var bin_count = 10;
+            // Set the range we can generate the new value based on a function
+            //  of the difference of the max and min values
+            var max_delta = (max_val - min_val) / bin_count;
+            console.log('Setting max_delta = %s', max_delta);
+            var new_val = rand_func(
+                    Math.max(min_val, value-max_delta),
+                    Math.min(max_val, value+max_delta));
+            //console.log("Generated value: %s", new_val);
+            return new_val;
+        },
+        packet_size: function(value, elapsed_seconds) {
+            // Stub method just returns value unchanged
+            // TODO: Figure out how we want to vary packet sizes
+            return value;
+        },
+        accumulate: function(value, elapsed_seconds) {
+            // NOT TESTED. Basic idea. Will want to add variablility
+            // how fast we accumulate
+            var accumulate_rate = 0.1;
+            var new_value = value + (elapsed_seconds * accumulate_rate);
+            return new_value;
+        }
+        // add growth function
+    };
+
+    // Declare our monitoring param id to sim function mapping here
+    // TODO: Move out to a yaml/json file and make this function belong to
+    // a 'Class'
+    //var mapper = {
+    //    'tx-rate-pp1': funcs['tx_rc_rate'],
+    //    'rc-rate-pp1': funcs['tx_rc_rate'] 
+    //};
+
+    var sim_func_name = this.rules['mp-mapper'][mp.id];
+    if (sim_func_name) {
+        return funcs[sim_func_name];
+    } else {
+        console.log('No time step sim function found for monitoring param with id "%s", using constant value', mp.id); 
+        return function(value, elapsed_seconds) {
+            return value;
+        }
+    }
+}
+
+module.exports = {
+    SimMp: SimMp
+};
diff --git a/rwlaunchpad/mock/test/test_simmp.js b/rwlaunchpad/mock/test/test_simmp.js
new file mode 100644 (file)
index 0000000..08833cf
--- /dev/null
@@ -0,0 +1,28 @@
+var assert = require('assert');
+
+var simmp_module = require('../simmp.js');
+
+// This is an example test on SimMp. It is not a very good test, but shows
+// how to write a basic test in mocha
+describe('SimMp', function() {
+    describe('#createSimMonitorFunc()', function () {
+        it('should return tx_rc_rate', function () {
+            var mp = {
+                id: 'tx-rate-pp1',
+                min_value: 0,
+                max_value: 100,
+                current_value: 0
+            };
+            var simmp = new simmp_module.SimMp({
+                "mp-mapper": { "tx-rate-pp1": "tx_rc_rate" }
+            });
+            assert(simmp != null, 'Could not instantiate simmp');
+            var func = simmp.createSimMonitorFunc(mp);
+            var value = func(0);
+            assert(value >= mp.min_value, 'value less than min value);
+            assert(value <= mp.max_value, 'value greater than max value');
+
+       });
+    });
+});
+
diff --git a/rwlaunchpad/plugins/CMakeLists.txt b/rwlaunchpad/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..dfc3ce0
--- /dev/null
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Author(s): Austin Cormier
+# Creation Date: 5/12/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+  cli
+  rwimagemgr
+  rwlaunchpadtasklet
+  rwautoscaler
+  rwmonitor
+  rwmonparam
+  rwnsm
+  rwresmgr
+  rwvnfm
+  rwvns
+  vala
+  yang
+  )
+
+##
+# Include the subdirs
+##
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/cli/CMakeLists.txt b/rwlaunchpad/plugins/cli/CMakeLists.txt
new file mode 100644 (file)
index 0000000..0819297
--- /dev/null
@@ -0,0 +1,30 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 6/11/2016
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+##
+# Install the XML file
+##
+install(
+  FILES
+  cli_launchpad_schema_listing.txt
+  DESTINATION usr/data/manifest
+  COMPONENT ${PKG_LONG_NAME}
+)
diff --git a/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt b/rwlaunchpad/plugins/cli/cli_launchpad_schema_listing.txt
new file mode 100644 (file)
index 0000000..c64cff6
--- /dev/null
@@ -0,0 +1,55 @@
+ietf-inet-types
+ietf-l2-topology
+ietf-netconf-notifications
+ietf-network
+ietf-network-topology
+ietf-restconf-monitoring
+ietf-yang-types
+mano-types
+nsd
+nsr
+rw-base
+rwcal
+rw-cli-ext
+rw-cloud
+rw-config-agent
+rw-conman
+rw-debug
+rw-dts
+rw-dtsperf
+rw-dtsperfmgr
+rw-launchpad
+rw-image-mgmt
+rw-log
+rwlog-mgmt
+rw-manifest
+rw-memlog
+rw-mgmtagt
+rw-mgmt-schema
+rwmsg-data
+rw-netconf
+rw-restconf
+rw-notify-ext
+rw-nsd
+rw-nsm
+rw-nsr
+rw-pb-ext
+rw-resource-mgr
+rw-restportforward
+rwsdn
+rw-sdn
+rwshell-mgmt
+rw-sorch
+rw-topology
+rw-vcs
+rwvcs-types
+rw-vld
+rw-vlr
+rw-vnfd
+rw-vnfr
+rw-yang-types
+vld
+vlr
+vnfd
+vnffgd
+vnfr
diff --git a/rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt b/rwlaunchpad/plugins/rwautoscaler/CMakeLists.txt
new file mode 100644 (file)
index 0000000..533588e
--- /dev/null
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/07/01
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwautoscaler)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/engine.py
+    rift/tasklets/${TASKLET_NAME}/scaling_operation.py
+    rift/tasklets/${TASKLET_NAME}/subscribers.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwautoscaler/Makefile b/rwlaunchpad/plugins/rwautoscaler/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/__init__.py
new file mode 100644 (file)
index 0000000..3bdb192
--- /dev/null
@@ -0,0 +1 @@
+from .rwautoscaler import AutoScalerTasklet
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/engine.py
new file mode 100644 (file)
index 0000000..d71aefc
--- /dev/null
@@ -0,0 +1,422 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import abc
+import asyncio
+import time
+
+import numpy
+
+from . import scaling_operation
+from . import subscribers as monp_subscriber
+from gi.repository import RwDts as rwdts
+import rift.mano.dts as subscriber
+
+
+class TimeSeries:
+    """Convenience class to hold the data for the sliding window size.
+    """
+    def __init__(self, threshold_time):
+        """
+        Args:
+            threshold_time (int): window size in secs
+        """
+
+        # 0 -> contains a list of all timestamps
+        # 1 -> contains a list of all values.
+        self._series = numpy.empty(shape=(2, 1), dtype='int64')
+        self.threshold_time = threshold_time
+
+    def add_value(self, timestamp, value):
+        timestamp = int(timestamp)
+
+        self._series = numpy.append(
+                self._series,
+                [[timestamp], [value]],
+                axis=1)
+
+        # Drop off stale value
+        # 0 -> timestamp
+        # 1 -> values
+        # Get all indexes that are outside the window, and drop them
+        window_values = self._series[0] >= (timestamp - self.threshold_time)
+        self._series = self._series[:, window_values]
+
+    def average(self):
+        return numpy.average(self._series[1])
+
+    def is_window_full(self):
+        """Verify if there is sufficient data for the current window.
+        """
+        if len(self._series[0]) <= 2:
+            return False
+
+        start_time = self._series[0][0]
+        end_time = self._series[0][-1]
+
+        if (end_time - start_time) >= self.threshold_time:
+            return True
+
+        return False
+
+
+class ScalingCriteria:
+    class Delegate:
+        """Delegate: callbacks triggered by ScalingCriteris
+        """
+        @abc.abstractmethod
+        def threshold_out_breached(self, criteria_name, avg_value):
+            """Called when the value has crossed the scale-out-threshold
+
+            Args:
+                criteria_name (str): Criteria name
+                avg_value (float): The average value of the window.
+
+            """
+            pass
+
+        @abc.abstractmethod
+        def threshold_in_breached(self, criteria_name, avg_value):
+            """Called when the value has drops below the scale-in-threshold
+
+            Args:
+                criteria_name (str): Criteria name
+                avg_value (float): The average value of the window.
+
+            """
+
+            pass
+
+    def __init__(
+            self,
+            log,
+            dts,
+            loop,
+            nsr_id,
+            monp_id,
+            scaling_criteria,
+            window_size,
+            sampling_period=1,
+            delegate=None):
+        """
+        Args:
+            log : Log
+            dts : DTS handle
+            loop : Event Handle
+            nsr_id (str): NSR ID
+            monp_id (str): Monitoring parameter
+            scaling_criteria : Yang data model
+            window_size (int): Length of the window
+            delegate : ScalingCriteria.Delegate
+
+        Note:
+
+        """
+        self.log = log
+        self.dts = dts
+        self.loop = loop
+        self.sampling_period = sampling_period
+        self.window_size = window_size
+        self.delegate = delegate
+        self.nsr_id, self.monp_id = nsr_id, monp_id
+
+        self._scaling_criteria = scaling_criteria
+        self._timeseries = TimeSeries(self.window_size)
+        # Flag when set, triggers scale-in request.
+        self._scl_in_limit_enabled = False
+
+        self.nsr_monp_sub = monp_subscriber.NsrMonParamSubscriber(
+                self.log,
+                self.dts,
+                self.loop,
+                self.nsr_id,
+                self.monp_id,
+                callback=self.add_value)
+
+    @property
+    def name(self):
+        return self._scaling_criteria.name
+
+    @property
+    def scale_in(self):
+        return self._scaling_criteria.scale_in_threshold
+
+    @property
+    def scale_out(self):
+        return self._scaling_criteria.scale_out_threshold
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.nsr_monp_sub.register()
+
+    def deregister(self):
+        self.nsr_monp_sub.deregister()
+
+    def trigger_action(self, timestamp, avg):
+        """Triggers the scale out/in
+
+        Args:
+            timestamp : time in unix epoch
+            avg : Average of all the values in the window size.
+
+        """
+        if self._timeseries.average() >= self.scale_out:
+            # Enable the scale in limit, only when a scale-out has happened.
+            self._scl_in_limit_enabled = True
+            self.delegate.threshold_out_breached(self.name, avg)
+
+        elif self._timeseries.average() < self.scale_in and self._scl_in_limit_enabled:
+            self._scl_in_limit_enabled = False
+            self.delegate.threshold_in_breached(self.name, avg)
+
+
+    def add_value(self, monp, action):
+        """Callback from NsrMonParamSubscriber
+
+        Args:
+            monp : Yang model
+            action : rwdts.QueryAction
+        """
+        if action == rwdts.QueryAction.DELETE:
+            return
+
+        value = monp.value_integer
+        timestamp = time.time()
+
+        self._timeseries.add_value(timestamp, value)
+
+        if not self._timeseries.is_window_full():
+            return
+
+        if not self.delegate:
+            return
+
+        self.trigger_action(timestamp, value)
+
+
+class ScalingPolicy(ScalingCriteria.Delegate):
+    class Delegate:
+        @abc.abstractmethod
+        def scale_in(self, scaling_group_name, nsr_id):
+            """Delegate called when all the criteria for scaling-in are met.
+
+            Args:
+                scaling_group_name (str): Description
+                nsr_id (str): Description
+
+            """
+            pass
+
+        @abc.abstractmethod
+        def scale_out(self, scaling_group_name, nsr_id):
+            """Delegate called when all the criteria for scaling-out are met.
+
+            Args:
+                scaling_group_name (str): Description
+                nsr_id (str): Description
+            """
+            pass
+
+    def __init__(
+            self,
+            log,
+            dts,
+            loop,
+            nsr_id,
+            nsd_id,
+            scaling_group_name,
+            scaling_policy,
+            store,
+            delegate=None):
+        """
+
+        Args:
+            log : Log
+            dts : DTS handle
+            loop : Event loop
+            nsr_id (str): NSR id
+            nsd_id (str): NSD id
+            scaling_group_name (str): Scaling group ref
+            scaling_policy : Yang model
+            store (SubscriberStore): Subscriber store instance
+            delegate (None, optional): ScalingPolicy.Delegate
+        """
+        self.loop = loop
+        self.log = log
+        self.dts = dts
+        self.nsd_id = nsd_id
+        self.nsr_id = nsr_id
+        self.scaling_group_name = scaling_group_name
+
+        self._scaling_policy = scaling_policy
+        self.delegate = delegate
+        self.store = store
+
+        self.monp_sub = monp_subscriber.NsrMonParamSubscriber(
+                                self.log,
+                                self.dts,
+                                self.loop,
+                                self.nsr_id,
+                                callback=self.handle_nsr_monp)
+
+        self.criteria_store = {}
+
+        # Timestamp at which the scale-in/scale-out request was generated.
+        self._last_triggered_time = None
+        self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
+        self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
+
+    def get_nsd_monp_cfg(self, nsr_monp):
+        """Get the NSD's mon-param config.
+        """
+        nsd = self.store.get_nsd(self.nsd_id)
+        for monp in nsd.monitoring_param:
+            if monp.id == nsr_monp.nsd_mon_param_ref:
+                return monp
+
+    def handle_nsr_monp(self, monp, action):
+        """Callback for NSR mon-param handler.
+
+        Args:
+            monp : Yang Model
+            action : rwdts.QueryAction
+        
+        """
+        def handle_create():
+            if monp.id in self.criteria_store:
+                return
+
+            nsd_monp = self.get_nsd_monp_cfg(monp)
+            for cri in self.scaling_criteria:
+                if cri.ns_monitoring_param_ref != nsd_monp.id:
+                    continue
+
+                # Create a criteria object as soon as the first monitoring data
+                # is published.
+                criteria = ScalingCriteria(
+                        self.log,
+                        self.dts,
+                        self.loop,
+                        self.nsr_id,
+                        monp.id,
+                        cri,
+                        self.threshold_time,  # window size
+                        delegate=self)
+
+                self.criteria_store[monp.id] = criteria
+
+                @asyncio.coroutine
+                def task():
+                    yield from criteria.register()
+
+                self.loop.create_task(task())
+
+        def handle_delete():
+            if monp.id in self.criteria_store:
+                self.criteria_store[monp.id].deregister()
+                del self.criteria_store[monp.id]
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            handle_create()
+        elif action == rwdts.QueryAction.DELETE:
+            handle_delete()
+
+
+    @property
+    def scaling_criteria(self):
+        return self._scaling_policy.scaling_criteria
+
+    @property
+    def scale_in_op(self):
+        optype = self._scaling_policy.scale_in_operation_type
+        return scaling_operation.get_operation(optype)
+
+    @property
+    def scale_out_op(self):
+        optype = self._scaling_policy.scale_out_operation_type
+        return scaling_operation.get_operation(optype)
+
+    @property
+    def name(self):
+        return self._scaling_policy.name
+
+    @property
+    def threshold_time(self):
+        return self._scaling_policy.threshold_time
+
+    @property
+    def cooldown_time(self):
+        return self._scaling_policy.cooldown_time
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.monp_sub.register()
+
+    def deregister(self):
+        self.monp_sub.deregister()
+
+    def _is_in_cooldown(self):
+        """Verify if the current policy is in cooldown.
+        """
+        if not self._last_triggered_time:
+            return False
+
+        if (time.time() - self._last_triggered_time) >= self.cooldown_time:
+            return False
+
+        return True
+
+    def threshold_in_breached(self, criteria_name, value):
+        """Delegate callback when scale-in threshold is breached
+
+        Args:
+            criteria_name : Criteria name
+            value : Average value
+        """
+        if self._is_in_cooldown():
+            return
+
+        self.scale_in_status[criteria_name] = True
+
+        statuses = self.scale_in_status.values()
+        is_breached = self.scale_in_op(statuses)
+
+        if is_breached and self.delegate:
+            self._last_triggered_time = time.time()
+            # Reset all statuses
+            self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
+            self.delegate.scale_in(self.scaling_group_name, self.nsr_id)
+
+    def threshold_out_breached(self, criteria_name, value):
+        """Delegate callback when scale-out threshold is breached.
+        Args:
+            criteria_name : Criteria name
+            value : Average value
+        """
+        if self._is_in_cooldown():
+            return
+
+        self.scale_out_status[criteria_name] = True
+
+        statuses = self.scale_out_status.values()
+        is_breached = self.scale_out_op(statuses)
+
+        if is_breached and self.delegate:
+            self._last_triggered_time = time.time()
+            # Reset all statuses
+            self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
+            self.delegate.scale_out(self.scaling_group_name, self.nsr_id)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/rwautoscaler.py
new file mode 100644 (file)
index 0000000..affa579
--- /dev/null
@@ -0,0 +1,230 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwautoscaler.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 01-Jul-2016
+
+"""
+import asyncio
+import collections
+
+from . import engine
+from . import subscribers as monp_subscriber
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+from gi.repository import (
+        RwDts as rwdts,
+        NsrYang,
+        RwLaunchpadYang,
+        ProtobufC)
+import rift.mano.cloud
+import rift.mano.dts as subscriber
+import rift.tasklets
+
+
+
+class AutoScalerTasklet(rift.tasklets.Tasklet, engine.ScalingPolicy.Delegate):
+    """The main task of this Tasklet is to listen for NSR changes and once the
+    NSR is configured, ScalingPolicy is created.
+    """
+    def __init__(self, *args, **kwargs):
+
+        try:
+            super().__init__(*args, **kwargs)
+            self.store = None
+            self.monparam_store = None
+
+            self.nsr_sub = None
+            self.nsr_monp_subscribers = {}
+            self.instance_id_store = collections.defaultdict(list)
+
+        except Exception as e:
+            self.log.exception(e)
+
+    def start(self):
+        super().start()
+
+        self.log.debug("Registering with dts")
+
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.store = subscriber.SubscriberStore.from_tasklet(self)
+        self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop, self.handle_nsr)
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.dts.deinit()
+        except Exception as e:
+            self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating vnfr subscriber")
+        yield from self.store.register()
+        yield from self.nsr_sub.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def scale_in(self, scaling_group_name, nsr_id):
+        """Delegate callback
+
+        Args:
+            scaling_group_name (str): Scaling group name to be scaled in
+            nsr_id (str): NSR id
+
+        """
+        self.log.info("Sending a scaling-in request for {} in NSR: {}".format(
+                scaling_group_name,
+                nsr_id))
+
+        @asyncio.coroutine
+        def _scale_in():
+            instance_id = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
+
+            # Trigger an rpc
+            rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
+                'nsr_id_ref': nsr_id,
+                'instance_id': instance_id,
+                'scaling_group_name_ref': scaling_group_name})
+
+            rpc_out = yield from self.dts.query_rpc(
+                        "/nsr:exec-scale-in",
+                        0,
+                        rpc_ip)
+
+        self.loop.create_task(_scale_in())
+
+    def scale_out(self, scaling_group_name, nsr_id):
+        """Delegate callback for scale out requests
+
+        Args:
+            scaling_group_name (str): Scaling group name
+            nsr_id (str): NSR ID
+        """
+        self.log.info("Sending a scaling-out request for {} in NSR: {}".format(
+                scaling_group_name,
+                nsr_id))
+
+        @asyncio.coroutine
+        def _scale_out():
+            # Trigger an rpc
+            rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
+                'nsr_id_ref': nsr_id ,
+                'scaling_group_name_ref': scaling_group_name})
+
+            itr = yield from self.dts.query_rpc("/nsr:exec-scale-out", 0, rpc_ip)
+
+            key = (scaling_group_name, nsr_id)
+            for res in itr:
+                result = yield from res
+                rpc_out = result.result
+                self.instance_id_store[key].append(rpc_out.instance_id)
+
+                self.log.info("Created new scaling group {} with instance id {}".format(
+                        scaling_group_name,
+                        rpc_out.instance_id))
+
+        self.loop.create_task(_scale_out())
+
+
+    def handle_nsr(self, nsr, action):
+        """Callback for NSR opdata changes. Creates a publisher for every
+        NS that moves to config state.
+
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+            action (rwdts.QueryAction): Action type of the change.
+        """
+        def nsr_create():
+            if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monp_subscribers:
+                nsr_id = nsr.ns_instance_config_ref
+                self.nsr_monp_subscribers[nsr_id] = []
+                nsd = self.store.get_nsd(nsr.nsd_ref)
+                @asyncio.coroutine
+                def task():
+                    for scaling_group in nsd.scaling_group_descriptor:
+                        for policy_cfg in scaling_group.scaling_policy:
+                            policy = engine.ScalingPolicy(
+                                self.log, self.dts, self.loop,
+                                nsr.ns_instance_config_ref,
+                                nsr.nsd_ref,
+                                scaling_group.name,
+                                policy_cfg,
+                                self.store,
+                                delegate=self)
+                            self.nsr_monp_subscribers[nsr_id].append(policy)
+                            yield from policy.register()
+
+                self.loop.create_task(task())
+
+
+        def nsr_delete():
+            if nsr.ns_instance_config_ref in self.nsr_monp_subscribers:
+                policies = self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+                for policy in policies:
+                    policy.deregister()
+                del self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            nsr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            nsr_delete()
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/scaling_operation.py
new file mode 100644 (file)
index 0000000..c5ffb3c
--- /dev/null
@@ -0,0 +1,41 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import abc
+
+def get_operation(operation):
+
+    op_map = {"AND": AndScalingOperation(),
+              "OR": OrScalingOperation()}
+
+    return op_map[operation]
+
+
+class ScalingOperation:
+    @abc.abstractmethod
+    def __call__(self, statuses):
+        pass
+
+
+class AndScalingOperation():
+    def __call__(self, statuses):
+        return all(statuses)
+
+
+class OrScalingOperation():
+    def __call__(self, statuses):
+        return any(statuses)
diff --git a/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py b/rwlaunchpad/plugins/rwautoscaler/rift/tasklets/rwautoscaler/subscribers.py
new file mode 100644 (file)
index 0000000..04185b6
--- /dev/null
@@ -0,0 +1,40 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import rift.mano.dts as mano_dts
+
+
+class NsrMonParamSubscriber(mano_dts.AbstractOpdataSubscriber):
+    """Registers for NSR monitoring parameter changes.
+    
+    Attributes:
+        monp_id (str): Monitoring Param ID
+        nsr_id (str): NSR ID
+    """
+    def __init__(self, log, dts, loop, nsr_id, monp_id=None, callback=None):
+        super().__init__(log, dts, loop, callback)
+        self.nsr_id = nsr_id
+        self.monp_id = monp_id
+
+    def get_xpath(self):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+            "[nsr:ns-instance-config-ref='{}']".format(self.nsr_id) +
+            "/nsr:monitoring-param" +
+            ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else ""))
+
+
+
diff --git a/rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py b/rwlaunchpad/plugins/rwautoscaler/rwautoscaler.py
new file mode 100644 (file)
index 0000000..7fc24ad
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwautoscaler
+
+class Tasklet(rift.tasklets.rwautoscaler.AutoScalerTasklet):
+    pass
diff --git a/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py b/rwlaunchpad/plugins/rwautoscaler/test/utest_autoscaler_dts.py
new file mode 100644 (file)
index 0000000..78342ce
--- /dev/null
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import os
+import sys
+import unittest
+import random
+
+import xmlrunner
+import unittest.mock as mock
+
+import rift.test.dts
+import rift.tasklets.rwautoscaler.engine as engine
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwNsrYang,
+        NsrYang,
+        NsdYang,
+        RwLaunchpadYang as launchpadyang,
+        RwVnfrYang,
+        RwVnfdYang,
+        RwNsdYang
+        )
+
+
+ScalingCriteria = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
+ScalingPolicy = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
+
+
+class MockDelegate(engine.ScalingCriteria.Delegate):
+    def __init__(self):
+        self.scale_in_called = 0
+        self.scale_out_called = 0
+
+    def scale_in(self, name, val):
+        print ("=============================================")
+        print ("Scaling IN")
+        print ("=============================================")
+        self.scale_in_called += 1
+
+    def scale_out(self, name, val):
+        print ("=============================================")
+        print ("Scaling OUT")
+        print ("=============================================")
+        self.scale_out_called += 1
+
+
+class MockStore():
+    def __init__(self, aggregation_type="AVERAGE", legacy=False):
+        self.aggregation_type = aggregation_type
+        self.legacy = legacy
+        self.threshold_time = 3
+
+    def __call__(self):
+        store = mock.MagicMock()
+
+        mock_vnfd =  RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+            'id': "1",
+            'monitoring_param': [
+                {'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '1',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-request-tx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'},
+                {'description': 'no of ping responses',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '2',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-response-rx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'}],
+            })
+
+        store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+            'id': '1',
+            'vnfd_ref': '1',
+            })
+        store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+            'ns_instance_config_ref': "1",
+            'name_ref': "Foo",
+            'nsd_ref': '1',
+            'config_status': 'configured',
+            'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
+            })
+
+        store.get_nsr = mock.MagicMock(return_value=mock_nsr)
+        store.nsr = [mock_nsr]
+
+        monp_cfg = [{'aggregation_type': self.aggregation_type,
+                 'id': '1',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                },
+                {'aggregation_type': self.aggregation_type,
+                 'id': '2',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                }]
+
+        scale_in_val = 100
+        scale_out_val = 200
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+            'id': '1',
+            'monitoring_param': (monp_cfg if not self.legacy else []),
+            'constituent_vnfd': [{'member_vnf_index': 1,
+                 'start_by_default': True,
+                 'vnfd_id_ref': '1'},
+                {'member_vnf_index': 2,
+                 'start_by_default': True,
+                 'vnfd_id_ref': '1'}],
+            'scaling_group_descriptor': [{
+                    "name": "http",
+                    "vnfd_member": [{
+                        'member_vnf_index_ref': 1,
+                    }],
+                    "scaling_policy": [{
+                        "scaling_type": "automatic",
+                        "enabled": True,
+                        "threshold_time": self.threshold_time,
+                        "cooldown_time": 60,
+                        "scale_out_operation_type": "AND",
+                        "scale_in_operation_type": "AND",
+                        "scaling_criteria": [{
+                            "name": "1",
+                            "scale_in_threshold": scale_in_val,
+                            "scale_out_threshold": scale_out_val,
+                            "ns_monitoring_param_ref": "1"
+                        },
+                        {
+                            "name": "2",
+                            "scale_in_threshold": scale_in_val,
+                            "scale_out_threshold": scale_out_val,
+                            "ns_monitoring_param_ref": "2"
+                        }]
+                    }]
+                }]
+            })
+
+        store.get_nsd = mock.MagicMock(return_value=mock_nsd)
+
+        return store
+
+
+class AutoscalarDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.mock_store = MockStore()
+
+    def tearDown(self):
+        super().tearDown()
+
+    @asyncio.coroutine
+    def _populate_mock_values(self, criterias, nsr_id, floor, ceil):
+        # Mock publish
+        # Verify Scale in AND operator
+        NsMonParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+
+        publisher = rift.test.dts.DescriptorPublisher(self.log, self.dts, self.loop)
+
+        for criteria in criterias:
+            monp_id = criteria.ns_monitoring_param_ref
+            w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
+            w_xpath = w_xpath + "[nsr:ns-instance-config-ref='{}']/nsr:monitoring-param".format(nsr_id)
+            xpath =  w_xpath + "[nsr:id ='{}']".format(monp_id)
+
+            for i in range(self.mock_store.threshold_time + 1):
+                value = random.randint(floor, ceil)
+
+                monp = NsMonParam.from_dict({
+                        'id': monp_id,
+                        'value_integer': value,
+                        'nsd_mon_param_ref': monp_id})
+
+                yield from publisher.publish(w_xpath, xpath, monp)
+                yield from asyncio.sleep(1)
+
+    @rift.test.dts.async_test
+    def test_scale_in(self):
+        store = self.mock_store()
+
+        # CFG
+        floor, ceil = 0, 100
+        nsr_id = store.get_nsr().ns_instance_config_ref
+        policy_cfg = store.get_nsd().scaling_group_descriptor[0].scaling_policy[0]
+        scaling_name = store.get_nsd().scaling_group_descriptor[0].name
+
+
+        def make_policy():
+            policy = engine.ScalingPolicy(
+                    self.log, self.dts, self.loop,
+                    store.get_nsr().ns_instance_config_ref, store.get_nsd().id,
+                    scaling_name, policy_cfg, store, delegate=mock_delegate)
+
+            return policy
+
+        @asyncio.coroutine
+        def scale_out(policy):
+            yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, 200, 300)
+            # HACK TO RESET THE COOLING TIME
+            policy._last_triggered_time = 0
+
+        # Test 1: Scale in shouldn't be called, unless a scale-out happens
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 0
+
+        # Test 2: AND operation 
+        yield from scale_out(policy)
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 1
+
+        # Test 3: AND operation failure
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from scale_out(policy)
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 0
+
+
+        # Test 4: OR operation
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        policy_cfg.scale_in_operation_type = "OR"
+        yield from policy.register()
+        yield from scale_out(policy)
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_in_called == 1
+
+    @rift.test.dts.async_test
+    def _test_scale_out(self):
+        """ Tests scale out
+
+        Asserts:
+            1. Scale out
+            2. Scale out doesn't happen during cooldown
+            3. AND operation 
+            4. OR operation.
+        """
+        store = self.mock_store()
+
+        # CFG
+        floor, ceil = 200, 300
+        nsr_id = store.get_nsr().ns_instance_config_ref
+        policy_cfg = store.get_nsd().scaling_group_descriptor[0].scaling_policy[0]
+        scaling_name = store.get_nsd().scaling_group_descriptor[0].name
+
+
+        def make_policy():
+            policy = engine.ScalingPolicy(
+                    self.log, self.dts, self.loop,
+                    store.get_nsr().ns_instance_config_ref, store.get_nsd().id,
+                    scaling_name, policy_cfg, store, delegate=mock_delegate)
+
+            return policy
+
+        # Test 1: Scale out should be called only when both the criteria are
+        # exceeding.
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 1
+
+        # Test 2: Assert if Scale out doesn't happen when only one exceeds
+        mock_delegate = MockDelegate()
+        policy = make_policy()
+        yield from policy.register()
+        yield from self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 0
+
+        # Test 3: OR operation
+        mock_delegate = MockDelegate()
+        policy_cfg.scale_out_operation_type = "OR"
+        policy = make_policy()
+        yield from policy.register()
+        yield from  self._populate_mock_values([policy.scaling_criteria[0]], nsr_id, floor, ceil)
+        assert mock_delegate.scale_out_called == 1
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt b/rwlaunchpad/plugins/rwimagemgr/CMakeLists.txt
new file mode 100644 (file)
index 0000000..58b3429
--- /dev/null
@@ -0,0 +1,94 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 2016/06/23
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwimagemgrtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/rwimagemgr/__init__.py
+    rift/tasklets/rwimagemgr/glance_client.py
+    rift/tasklets/rwimagemgr/glance_proxy_server.py
+    rift/tasklets/rwimagemgr/tasklet.py
+    rift/tasklets/rwimagemgr/upload.py
+    rift/tasklets/rwimagemgr/lib/__init__.py
+    rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
+    rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_python_install_tree(
+  FILES
+    rift/imagemgr/__init__.py
+    rift/imagemgr/client.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+install(
+    PROGRAMS
+        bin/glance_start_wrapper
+    DESTINATION
+        usr/bin
+    COMPONENT ${PKG_LONG_NAME}
+    )
+
+if($ENV{RIFT_PLATFORM} MATCHES "fc20")
+  install(
+      FILES
+          etc/fc20/glance-api.conf
+          etc/fc20/glance-registry.conf
+          etc/fc20/glance-scrubber.conf
+          etc/fc20/glance-cache.conf
+          etc/fc20/policy.json
+          etc/fc20/schema-image.json
+          etc/fc20/glance-api-dist-paste.ini
+      DESTINATION
+          etc/glance
+      COMPONENT ${PKG_LONG_NAME}
+      )
+elseif($ENV{RIFT_PLATFORM} MATCHES "ub16")
+  install(
+      FILES
+          etc/ub16/glance-api.conf
+          etc/ub16/glance-api-paste.ini
+          etc/ub16/glance-registry.conf
+          etc/ub16/glance-registry-paste.ini
+          etc/ub16/glance-cache.conf
+          etc/ub16/glance-manage.conf
+          etc/ub16/policy.json
+          etc/ub16/schema-image.json
+      DESTINATION
+          etc/glance
+      COMPONENT ${PKG_LONG_NAME}
+      )
+else()
+    message(FATAL_ERROR "Unknown platform $ENV{RIFT_PLATFORM}")
+endif()
+
+rift_add_subdirs(test)
diff --git a/rwlaunchpad/plugins/rwimagemgr/Makefile b/rwlaunchpad/plugins/rwimagemgr/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper b/rwlaunchpad/plugins/rwimagemgr/bin/glance_start_wrapper
new file mode 100755 (executable)
index 0000000..3294aee
--- /dev/null
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+# A single executable which starts necessary glance server components
+#
+# Create a workspace-specific glance config directory and
+# wrap the glance-api and glance-registry procs.
+#
+#
+# USAGE: ./glance_start_wrapper <glance_conf_dir>
+#
+#
+
+if [ $# -ne 1 ]; then
+    echo "error: specifiy the glance conf dir"
+    exit 1
+fi
+
+src_conf_dir="$1"
+if [ ! -d "${src_conf_dir}" ]; then
+    echo "error: glance conf dir does not exist"
+    exit 1
+fi
+
+if [ -z ${RIFT_INSTALL+x} ]; then
+    echo "error: RIFT_INSTALL is not set"
+    exit 1
+fi
+
+if [ -z "${RIFT_VAR_ROOT}" ]; then
+    if [ -n "${RIFT_INSTALL}" ]; then
+        RIFT_VAR_ROOT="${RIFT_INSTALL}/var"
+    else
+        RIFT_VAR_ROOT="$(mktemp -d)"
+        echo "warning: RIFT_VAR_ROOT or RIFT_INSTALL not provided, using temporary directory"
+    fi
+fi
+
+dest_conf_dir="${RIFT_VAR_ROOT}/glance/conf"
+echo "destination glance conf directory: ${dest_conf_dir}"
+
+if [ -e "${dest_conf_dir}" ]; then
+    echo "removing ${dest_conf_dir}"
+    #rm -rf "${dest_conf_dir}"
+fi
+
+mkdir -p "${dest_conf_dir}"
+
+for conf_file in ${src_conf_dir}/*; do
+    cp "${conf_file}" ${dest_conf_dir}/
+    dest_file="${dest_conf_dir}/$(basename ${conf_file})"
+    sed -i "s|{RIFT_VAR_ROOT}|${RIFT_VAR_ROOT}|g" "${dest_file}"
+    sed -i "s|{RIFT_INSTALL}|${RIFT_INSTALL}|g" "${dest_file}"
+done
+
+mkdir -p ${RIFT_VAR_ROOT}/log/glance
+
+registry_pid=0
+api_pid=0
+killing=false
+
+function kill_children(){
+    if ${killing}; then
+        return
+    fi
+    killing=true
+
+    if [ ${registry_pid} -ne 0 ]; then
+        kill ${registry_pid} 2>/dev/null
+    fi
+
+    if [ ${api_pid} -ne 0 ]; then
+        kill ${api_pid} 2>/dev/null
+    fi
+
+    sleep 2
+
+    if [ ${registry_pid} -ne 0 ]; then
+        echo "KILL registry pid: ${registry_pid}"
+        kill -9 ${registry_pid} 2>/dev/null
+    fi
+
+    if [ ${api_pid} -ne 0 ]; then
+        echo "KILL api pid: ${api_pid}"
+        kill -9 ${api_pid} 2>/dev/null
+    fi
+
+    exit 1
+}
+
+
+function kill_group(){
+    # Kill any remaining children
+    kill_children
+
+    # Kill myself
+    kill -9 0
+}
+
+trap "kill_children" SIGHUP SIGINT SIGTERM SIGTRAP EXIT
+trap "kill_group" SIGCHLD
+
+glance-registry --config-dir ${dest_conf_dir} --config-file ${dest_conf_dir}/glance-registry.conf >/dev/null 2>&1&
+registry_pid="$!"
+if [ $? -ne 0 ]; then
+    echo "ERROR: Glance registry startup failed!" >&2
+    exit 1
+fi
+
+glance-api --config-dir ${dest_conf_dir} --config-file ${dest_conf_dir}/glance-api.conf >/dev/null 2>&1&
+api_pid="$!"
+if [ $? -ne 0 ]; then
+    echo "ERROR: Glance registry startup failed!" >&2
+    exit 1
+fi
+
+sleep 5
+
+manage_cfg=""
+if [ -e "${dest_conf_dir}/glance-manage.conf" ]; then
+    manage_cfg="--config-file ${dest_conf_dir}/glance-manage.conf"
+fi
+
+glance-manage --config-dir ${dest_conf_dir} ${manage_cfg} db_sync >/dev/null 2>&1&
+if [ $? -ne 0 ]; then
+    echo "ERROR: glance-manage db_sync failed" >&2
+    exit 1
+fi
+
+while true; do
+    sleep 1
+done
diff --git a/rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py b/rwlaunchpad/plugins/rwimagemgr/bin/upload_image.py
new file mode 100755 (executable)
index 0000000..3870c50
--- /dev/null
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import logging
+import sys
+
+from rift.tasklets.rwimagemgr import tasklet, glance_client
+from rift.mano.cloud import accounts
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwLog', '1.0')
+from gi.repository import (
+        RwCloudYang,
+        RwLog,
+        )
+
+openstack_info = {
+        'username': 'pluto',
+        'password': 'mypasswd',
+        'project_name': 'demo',
+        'auth_url': 'http://10.66.4.18:5000/v3',
+        'mgmt_network': 'private'
+        }
+
+
+def create_account(log):
+    account_msg = RwCloudYang.CloudAccount.from_dict(dict(
+        name="openstack",
+        account_type="openstack",
+        openstack=dict(
+            key=openstack_info["username"],
+            secret=openstack_info["password"],
+            tenant=openstack_info["project_name"],
+            auth_url=openstack_info["auth_url"]
+            )
+        )
+    )
+
+    account = accounts.CloudAccount(
+            log,
+            RwLog.Ctx.new(__file__),
+            account_msg
+            )
+
+    return account
+
+
+def parse_args(argv=sys.argv[1:]):
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--image-name", required=True)
+    parser.add_argument("--image-checksum", required=True)
+
+    return parser.parse_args()
+
+
+def main():
+    args = parse_args()
+    logging.basicConfig(level=logging.DEBUG)
+    log = logging.getLogger("upload_image.py")
+    loop = asyncio.get_event_loop()
+    cloud_account = create_account(log)
+    client = glance_client.OpenstackGlanceClient.from_token(
+            log, "127.0.0.1", 9292, "test"
+            )
+    task_creator = tasklet.GlanceClientUploadTaskCreator(
+            log, loop, {"openstack": cloud_account}, client,
+            )
+
+    tasks = loop.run_until_complete(
+            task_creator.create_tasks(
+                ["openstack"],
+                args.image_name,
+                args.image_checksum
+                )
+            )
+
+    log.debug("Created tasks: %s", tasks)
+
+    log.debug("uploading images")
+    loop.run_until_complete(asyncio.wait([t.start() for t in tasks], loop=loop))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api-dist-paste.ini
new file mode 100644 (file)
index 0000000..4f8f659
--- /dev/null
@@ -0,0 +1,72 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = versionnegotiation unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = versionnegotiation unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = versionnegotiation authtoken context rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = versionnegotiation authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = versionnegotiation authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = versionnegotiation context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = versionnegotiation context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-api.conf
new file mode 100644 (file)
index 0000000..4f11820
--- /dev/null
@@ -0,0 +1,446 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=True
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+default_store = file
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+#known_stores = glance.store.filesystem.Store,
+#               glance.store.http.Store,
+#               glance.store.rbd.Store,
+#               glance.store.s3.Store,
+#               glance.store.swift.Store,
+#               glance.store.sheepdog.Store,
+#               glance.store.cinder.Store,
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = 0.0.0.0
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+data_api = glance.db.sqlalchemy.api
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
+#sql_connection=mysql://glance:glance@localhost/glance
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-api.db
+
+# Period in seconds after which SQLAlchemy should reestablish its connection
+# to the database.
+#
+# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
+# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
+# notice this, you can lower this value to ensure that SQLAlchemy reconnects
+# before MySQL can drop the connection.
+sql_idle_timeout = 3600
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+allow_anonymous_access = True
+
+# Allow access to version 1 of glance api
+enable_v1_api = True
+
+# Allow access to version 2 of glance api
+enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system.  For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+container_formats=ami,ari,aki,bare,ovf
+
+# Supported values for the 'disk_format' image attribute
+disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+#
+# Property Protections config file
+# This file contains the rules for property protections and the roles
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then an
+# HTTPInternalServerError will be thrown.
+#property_protection_file =
+
+# Set a system wide quota for every user.  This value is the total number
+# of bytes that a user can use across all storage systems.  A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+db_auto_create = True
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+sqlalchemy_debug = True
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when images are create, updated or deleted.
+# There are three methods of sending notifications, logging (via the
+# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
+# message queue), or noop (no notifications sent, the default)
+notifier_strategy=noop
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = guest
+rabbit_password = guest
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+#qpid_heartbeat=60
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir={RIFT_VAR_ROOT}/glance/images/
+
+# A path to a JSON file that contains metadata describing the storage
+# system.  When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+rbd_store_user = glance
+
+# RADOS pool in which images are stored
+rbd_store_pool = images
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir={RIFT_VAR_ROOT}/glance/scrubber
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir={RIFT_VAR_ROOT}/glance/image-cache/
+
+[keystone_authtoken]
+#auth_host=127.0.0.1
+#auth_port=35357
+#auth_protocol=http
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+#admin_user=%SERVICE_USER%
+#admin_password=%SERVICE_PASSWORD%
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+config_file={RIFT_INSTALL}/etc/glance/glance-api-dist-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-cache.conf
new file mode 100644 (file)
index 0000000..904eb7f
--- /dev/null
@@ -0,0 +1,168 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=False
+
+log_file={RIFT_VAR_ROOT}/log/glance/image-cache.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Directory that the Image Cache writes data to
+image_cache_dir={RIFT_VAR_ROOT}/glance/image-cache/
+
+# Number of seconds after which we should consider an incomplete image to be
+# stalled and eligible for reaping
+image_cache_stall_time = 86400
+
+# image_cache_invalid_entry_grace_period - seconds
+#
+# If an exception is raised as we're writing to the cache, the cache-entry is
+# deemed invalid and moved to <image_cache_datadir>/invalid so that it can be
+# inspected for debugging purposes.
+#
+# This is number of seconds to leave these invalid images around before they
+# are elibible to be reaped.
+image_cache_invalid_entry_grace_period = 3600
+
+# Max cache size in bytes
+image_cache_max_size = 10737418240
+
+# Address to find the registry server
+registry_host = 127.0.0.1
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# known_stores = glance.store.filesystem.Store,
+#                glance.store.http.Store,
+#                glance.store.rbd.Store,
+#                glance.store.s3.Store,
+#                glance.store.swift.Store,
+#                glance.store.sheepdog.Store,
+#                glance.store.cinder.Store,
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = {RIFT_VAR_ROOT}/glance/images/
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified,  default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+# s3_store_object_buffer_dir = /path/to/dir
+
+# ============ Cinder Store Options ===========================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+# metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-registry.conf
new file mode 100644 (file)
index 0000000..2529d1c
--- /dev/null
@@ -0,0 +1,100 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=True
+
+# Address to bind the registry server
+bind_host = 0.0.0.0
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/glance-registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+data_api = glance.db.sqlalchemy.api
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
+#sql_connection=mysql://glance:glance@localhost/glance
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# Period in seconds after which SQLAlchemy should reestablish its connection
+# to the database.
+#
+# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
+# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
+# notice this, you can lower this value to ensure that SQLAlchemy reconnects
+# before MySQL can drop the connection.
+sql_idle_timeout = 3600
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+db_auto_create = True
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+[keystone_authtoken]
+#auth_host=127.0.0.1
+#auth_port=35357
+#auth_protocol=http
+#admin_tenant_name=%SERVICE_TENANT_NAME%
+#admin_user=%SERVICE_USER%
+#admin_password=%SERVICE_PASSWORD%
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+config_file=/usr/share/glance/glance-registry-dist-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor=
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/glance-scrubber.conf
new file mode 100644 (file)
index 0000000..70b693b
--- /dev/null
@@ -0,0 +1,53 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose=True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug=False
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file={RIFT_VAR_ROOT}/log/glance/scrubber.log
+
+# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
+#use_syslog = False
+
+# Should we run our own loop or rely on cron/scheduler to run us
+daemon = False
+
+# Loop time between checking for new items to schedule for delete
+wakeup_time = 300
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-api.conf
+scrubber_datadir={RIFT_VAR_ROOT}/lib/glance/scrubber
+
+# Only one server in your deployment should be designated the cleanup host
+cleanup_scrubber = False
+
+# pending_delete items older than this time are candidates for cleanup
+cleanup_scrubber_time = 86400
+
+# Address to find the registry server for cleanups
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/policy.json
new file mode 100644 (file)
index 0000000..248b27e
--- /dev/null
@@ -0,0 +1,5 @@
+{
+    "context_is_admin":  "role:admin",
+    "default": "",
+    "manage_image_cache": "role:admin"
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json b/rwlaunchpad/plugins/rwimagemgr/etc/fc20/schema-image.json
new file mode 100644 (file)
index 0000000..5aafd6b
--- /dev/null
@@ -0,0 +1,28 @@
+{
+    "kernel_id": {
+        "type": "string",
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+    },
+    "ramdisk_id": {
+        "type": "string",
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+    },
+    "instance_uuid": {
+        "type": "string",
+        "description": "ID of instance used to create this image."
+    },
+    "architecture": {
+        "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_distro": {
+        "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_version": {
+        "description": "Operating system version as specified by the distributor",
+        "type": "string"
+    }
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api-paste.ini
new file mode 100644 (file)
index 0000000..9efd19f
--- /dev/null
@@ -0,0 +1,87 @@
+# Use this pipeline for no auth or image caching - DEFAULT
+[pipeline:glance-api]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context rootapp
+
+# Use this pipeline for image caching and no auth
+[pipeline:glance-api-caching]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache rootapp
+
+# Use this pipeline for caching w/ management interface but no auth
+[pipeline:glance-api-cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-api-keystone]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context  rootapp
+
+# Use this pipeline for keystone auth with image caching
+[pipeline:glance-api-keystone+caching]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache rootapp
+
+# Use this pipeline for keystone auth with caching and cache management
+[pipeline:glance-api-keystone+cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache cachemanage rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-api-trusted-auth]
+pipeline = cors healthcheck versionnegotiation osprofiler context rootapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user and uses cache management
+[pipeline:glance-api-trusted-auth+cachemanagement]
+pipeline = cors healthcheck versionnegotiation osprofiler context cache cachemanage rootapp
+
+[composite:rootapp]
+paste.composite_factory = glance.api:root_app_factory
+/: apiversions
+/v1: apiv1app
+/v2: apiv2app
+
+[app:apiversions]
+paste.app_factory = glance.api.versions:create_resource
+
+[app:apiv1app]
+paste.app_factory = glance.api.v1.router:API.factory
+
+[app:apiv2app]
+paste.app_factory = glance.api.v2.router:API.factory
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
+backends = disable_by_file
+disable_by_file_path = /etc/glance/healthcheck_disable
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
+
+[filter:cache]
+paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
+
+[filter:cachemanage]
+paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:gzip]
+paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY  #DEPRECATED
+enabled = yes  #DEPRECATED
+
+[filter:cors]
+paste.filter_factory =  oslo_middleware.cors:filter_factory
+oslo_config_project = glance
+oslo_config_program = glance-api
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-api.conf
new file mode 100644 (file)
index 0000000..65e2e8d
--- /dev/null
@@ -0,0 +1,1783 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.api
+#
+
+# When true, this option sets the owner of an image to be the tenant.
+# Otherwise, the owner of the  image will be the authenticated user
+# issuing the request. (boolean value)
+#owner_is_tenant = true
+
+# Role used to identify an authenticated user as administrator.
+# (string value)
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+allow_anonymous_access = True
+
+# Limits request ID length. (integer value)
+#max_request_id_length = 64
+
+# Public url to use for versions endpoint. The default is None, which
+# will use the request's host_url attribute to populate the URL base.
+# If Glance is operating behind a proxy, you will want to change this
+# to represent the proxy's URL. (string value)
+#public_endpoint = <None>
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# This value sets what strategy will be used to determine the image
+# location order. Currently two strategies are packaged with Glance
+# 'location_order' and 'store_type'. (string value)
+# Allowed values: location_order, store_type
+#location_strategy = location_order
+
+# The location of the property protection file.This file contains the
+# rules for property protections and the roles/policies associated
+# with it. If this config value is not specified, by default, property
+# protections won't be enforced. If a value is specified and the file
+# is not found, then the glance-api service will not start. (string
+# value)
+#property_protection_file = <None>
+
+# This config value indicates whether "roles" or "policies" are used
+# in the property protection file. (string value)
+# Allowed values: roles, policies
+#property_protection_rule_format = roles
+
+# Modules of exceptions that are permitted to be recreated upon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules = glance.common.exception,builtins,exceptions
+
+# Address to bind the server.  Useful when selecting a particular
+# network interface. (string value)
+bind_host = 0.0.0.0
+
+# The port on which the server will listen. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = 9292
+
+# The number of child process workers that will be created to service
+# requests. The default will be equal to the number of CPUs available.
+# (integer value)
+workers = 1
+
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated by the Keystone v3 API with big service catalogs (integer
+# value)
+#max_header_line = 16384
+
+# If False, server will return the header "Connection: close", If
+# True, server will return "Connection: Keep-Alive" in its responses.
+# In order to close the client socket connection explicitly after the
+# response is sent and read successfully by the client, you simply
+# have to set this option to False when you create a wsgi server.
+# (boolean value)
+#http_keepalive = true
+
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
+# The backlog value that will be used when creating the TCP listener
+# socket. (integer value)
+#backlog = 4096
+
+# The value for the socket option TCP_KEEPIDLE.  This is the time in
+# seconds that the connection must be idle before TCP starts sending
+# keepalive probes. (integer value)
+#tcp_keepidle = 600
+
+# CA certificate file to use to verify connecting clients. (string
+# value)
+#ca_file = <None>
+
+# Certificate file to use when starting API server securely. (string
+# value)
+#cert_file = <None>
+
+# Private key file to use when starting API server securely. (string
+# value)
+#key_file = <None>
+
+# The path to the sqlite file database that will be used for image
+# cache management. (string value)
+#image_cache_sqlite_db = cache.db
+
+# The driver to use for image cache management. (string value)
+#image_cache_driver = sqlite
+
+# The upper limit (the maximum size of accumulated cache in bytes)
+# beyond which the cache pruner, if running, starts cleaning the image
+# cache. (integer value)
+#image_cache_max_size = 10737418240
+
+# The amount of time to let an incomplete image remain in the cache,
+# before the cache cleaner, if running, will remove the incomplete
+# image. (integer value)
+#image_cache_stall_time = 86400
+
+# Base directory that the image cache uses. (string value)
+image_cache_dir = {RIFT_VAR_ROOT}/glance/image-cache/
+
+# Default publisher_id for outgoing notifications. (string value)
+#default_publisher_id = image.localhost
+
+# List of disabled notifications. A notification can be given either
+# as a notification type to disable a single event, or as a
+# notification group prefix to disable all events within a group.
+# Example: if this config option is set to ["image.create",
+# "metadef_namespace"], then "image.create" notification will not be
+# sent after image is created and none of the notifications for
+# metadefinition namespaces will be sent. (list value)
+#disabled_notifications =
+
+# Address to find the registry server. (string value)
+registry_host = 0.0.0.0
+
+# Port the registry server is listening on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+registry_port = 9191
+
+# Whether to pass through the user token when making requests to the
+# registry. To prevent failures with token expiration during big files
+# upload, it is recommended to set this parameter to False.If
+# "use_user_token" is not in effect, then admin credentials can be
+# specified. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#use_user_token = true
+
+# The administrators user name. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_user = <None>
+
+# The administrators password. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_password = <None>
+
+# The tenant name of the administrative user. If "use_user_token" is
+# not in effect, then admin tenant name can be specified. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_tenant_name = <None>
+
+# The URL to the keystone service. If "use_user_token" is not in
+# effect and using keystone auth, then URL of keystone can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_url = <None>
+
+# The strategy to use for authentication. If "use_user_token" is not
+# in effect, then auth strategy can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_strategy = noauth
+
+# The region for the authentication service. If "use_user_token" is
+# not in effect and using keystone auth, then region name can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_region = <None>
+
+# The protocol to use for communication with the registry server.
+# Either http or https. (string value)
+#registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE
+# environment variable to a filepath of the key file (string value)
+#registry_client_key_file = <None>
+
+# The path to the cert file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE
+# environment variable to a filepath of the CA cert file (string
+# value)
+#registry_client_cert_file = <None>
+
+# The path to the certifying authority cert file to use in SSL
+# connections to the registry server, if any. Alternately, you may set
+# the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the
+# CA cert file. (string value)
+#registry_client_ca_file = <None>
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's
+# equivalent of specifying --insecure on the command line using
+# glanceclient for the API. (boolean value)
+#registry_client_insecure = false
+
+# The period of time, in seconds, that the API server will wait for a
+# registry request to complete. A value of 0 implies no timeout.
+# (integer value)
+#registry_client_timeout = 600
+
+# Whether to pass through headers containing user and tenant
+# information when making requests to the registry. This allows the
+# registry to use the context middleware without keystonemiddleware's
+# auth_token middleware, removing calls to the keystone auth service.
+# It is recommended that when using this option, secure communication
+# between glance api and glance registry is ensured by means other
+# than auth_token middleware. (boolean value)
+#send_identity_headers = false
+
+# The amount of time in seconds to delay before performing a delete.
+# (integer value)
+#scrub_time = 0
+
+# The size of thread pool to be used for scrubbing images. The default
+# is one, which signifies serial scrubbing. Any value above one
+# indicates the max number of images that may be scrubbed in parallel.
+# (integer value)
+#scrub_pool_size = 1
+
+# Turn on/off delayed delete. (boolean value)
+#delayed_delete = false
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = True
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = True
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/glance-api.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string
+# value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no
+# linger period. Pending messages shall be discarded immediately when
+# the socket is closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend option
+# and driver specific configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers
+# include amqp and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+sqlite_db = {RIFT_VAR_ROOT}/glance/glance-api.db
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
+
+
+[glance_store]
+
+#
+# From glance.store
+#
+
+# List of stores enabled. Valid stores are: cinder, file, http, rbd,
+# sheepdog, swift, s3, vsphere (list value)
+stores = file,http
+
+# Default scheme to use to store image data. The scheme must be
+# registered by one of the stores defined by the 'stores' config
+# option. (string value)
+default_store = file
+
+# Minimum interval seconds to execute updating dynamic storage
+# capabilities based on backend status then. It's not a periodic
+# routine, the update logic will be executed only when interval
+# seconds elapsed and an operation of store has triggered. The feature
+# will be enabled only when the option value greater then zero.
+# (integer value)
+#store_capabilities_update_min_interval = 0
+
+# Specify the path to the CA bundle file to use in verifying the
+# remote server certificate. (string value)
+#https_ca_certificates_file = <None>
+
+# If true, the remote server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "https_ca_certificates_file" is set. (boolean value)
+#https_insecure = true
+
+# Specify the http/https proxy information that should be used to
+# connect to the remote server. The proxy information should be a key
+# value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can
+# specify proxies for multiple schemes by seperating the key value
+# pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080.
+# (dict value)
+#http_proxy_information =
+
+# If True, swiftclient won't check for a valid SSL certificate when
+# authenticating. (boolean value)
+#swift_store_auth_insecure = false
+
+# A string giving the CA certificate file to use in SSL connections
+# for verifying certs. (string value)
+#swift_store_cacert = <None>
+
+# The region of the swift endpoint to be used for single tenant. This
+# setting is only necessary if the tenant has multiple swift
+# endpoints. (string value)
+#swift_store_region = <None>
+
+# If set, the configured endpoint will be used. If None, the storage
+# url from the auth response will be used. (string value)
+#swift_store_endpoint = <None>
+
+# A string giving the endpoint type of the swift service to use
+# (publicURL, adminURL or internalURL). This setting is only used if
+# swift_store_auth_version is 2. (string value)
+#swift_store_endpoint_type = publicURL
+
+# A string giving the service type of the swift service to use. This
+# setting is only used if swift_store_auth_version is 2. (string
+# value)
+#swift_store_service_type = object-store
+
+# Container within the account that the account should use for storing
+# images in Swift when using single container mode. In multiple
+# container mode, this will be the prefix for all containers. (string
+# value)
+#swift_store_container = glance
+
+# The size, in MB, that Glance will start chunking image files and do
+# a large object manifest in Swift. (integer value)
+#swift_store_large_object_size = 5120
+
+# The amount of data written to a temporary disk buffer during the
+# process of chunking the image file. (integer value)
+#swift_store_large_object_chunk_size = 200
+
+# A boolean value that determines if we create the container if it
+# does not exist. (boolean value)
+#swift_store_create_container_on_put = false
+
+# If set to True, enables multi-tenant storage mode which causes
+# Glance images to be stored in tenant specific Swift accounts.
+# (boolean value)
+#swift_store_multi_tenant = false
+
+# When set to 0, a single-tenant store will only use one container to
+# store all images. When set to an integer value between 1 and 32, a
+# single-tenant store will use multiple containers to store images,
+# and this value will determine how many containers are created.Used
+# only when swift_store_multi_tenant is disabled. The total number of
+# containers that will be used is equal to 16^N, so if this config
+# option is set to 2, then 16^2=256 containers will be used to store
+# images. (integer value)
+#swift_store_multiple_containers_seed = 0
+
+# A list of tenants that will be granted read/write access on all
+# Swift containers created by Glance in multi-tenant mode. (list
+# value)
+#swift_store_admin_tenants =
+
+# If set to False, disables SSL layer compression of https swift
+# requests. Setting to False may improve performance for images which
+# are already in a compressed format, eg qcow2. (boolean value)
+#swift_store_ssl_compression = true
+
+# The number of times a Swift download will be retried before the
+# request fails. (integer value)
+#swift_store_retry_get_count = 0
+
+# The period of time (in seconds) before token expirationwhen
+# glance_store will try to reques new user token. Default value 60 sec
+# means that if token is going to expire in 1 min then glance_store
+# request new user token. (integer value)
+#swift_store_expire_soon_interval = 60
+
+# If set to True create a trust for each add/get request to Multi-
+# tenant store in order to prevent authentication token to be expired
+# during uploading/downloading data. If set to False then user token
+# is used for Swift connection (so no overhead on trust creation).
+# Please note that this option is considered only and only if
+# swift_store_multi_tenant=True (boolean value)
+#swift_store_use_trusts = true
+
+# The reference to the default swift account/backing store parameters
+# to use for adding new images. (string value)
+#default_swift_reference = ref1
+
+# Version of the authentication service to use. Valid versions are 2
+# and 3 for keystone and 1 (deprecated) for swauth and rackspace.
+# (deprecated - use "auth_version" in swift_store_config_file) (string
+# value)
+#swift_store_auth_version = 2
+
+# The address where the Swift authentication service is listening.
+# (deprecated - use "auth_address" in swift_store_config_file) (string
+# value)
+#swift_store_auth_address = <None>
+
+# The user to authenticate against the Swift authentication service
+# (deprecated - use "user" in swift_store_config_file) (string value)
+#swift_store_user = <None>
+
+# Auth key for the user authenticating against the Swift
+# authentication service. (deprecated - use "key" in
+# swift_store_config_file) (string value)
+#swift_store_key = <None>
+
+# The config file that has the swift account(s)configs. (string value)
+#swift_store_config_file = <None>
+
+# RADOS images will be chunked into objects of this size (in
+# megabytes). For best performance, this should be a power of two.
+# (integer value)
+#rbd_store_chunk_size = 8
+
+# RADOS pool in which images are stored. (string value)
+#rbd_store_pool = images
+
+# RADOS user to authenticate as (only applicable if using Cephx. If
+# <None>, a default will be chosen based on the client. section in
+# rbd_store_ceph_conf) (string value)
+#rbd_store_user = <None>
+
+# Ceph configuration file path. If <None>, librados will locate the
+# default config. If using cephx authentication, this file should
+# include a reference to the right keyring in a client.<USER> section
+# (string value)
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# Timeout value (in seconds) used when connecting to ceph cluster. If
+# value <= 0, no timeout is set and default librados value is used.
+# (integer value)
+#rados_connect_timeout = 0
+
+# Info to match when looking for cinder in the service catalog. Format
+# is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volumev2::publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v2/%(tenant)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node. If specified, it will be used to locate
+# OpenStack services for stores. (string value)
+# Deprecated group/name - [DEFAULT]/os_region_name
+#cinder_os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests.
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Time period of time in seconds to wait for a cinder volume
+# transition to complete. (integer value)
+#cinder_state_transition_timeout = 300
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = false
+
+# The address where the Cinder authentication service is listening. If
+# <None>, the cinder endpoint in the service catalog is used. (string
+# value)
+#cinder_store_auth_address = <None>
+
+# User name to authenticate against Cinder. If <None>, the user of
+# current context is used. (string value)
+#cinder_store_user_name = <None>
+
+# Password for the user authenticating against Cinder. If <None>, the
+# current context auth token is used. (string value)
+#cinder_store_password = <None>
+
+# Project name where the image is stored in Cinder. If <None>, the
+# project in current context is used. (string value)
+#cinder_store_project_name = <None>
+
+# Path to the rootwrap configuration file to use for running commands
+# as root. (string value)
+#rootwrap_config = /etc/glance/rootwrap.conf
+
+# The host where the S3 server is listening. (string value)
+#s3_store_host = <None>
+
+# The S3 query token access key. (string value)
+#s3_store_access_key = <None>
+
+# The S3 query token secret key. (string value)
+#s3_store_secret_key = <None>
+
+# The S3 bucket to be used to store the Glance data. (string value)
+#s3_store_bucket = <None>
+
+# The local directory where uploads will be staged before they are
+# transferred into S3. (string value)
+#s3_store_object_buffer_dir = <None>
+
+# A boolean to determine if the S3 bucket should be created on upload
+# if it does not exist or if an error should be returned to the user.
+# (boolean value)
+#s3_store_create_bucket_on_put = false
+
+# The S3 calling format used to determine the bucket. Either subdomain
+# or path can be used. (string value)
+#s3_store_bucket_url_format = subdomain
+
+# What size, in MB, should S3 start chunking image files and do a
+# multipart upload in S3. (integer value)
+#s3_store_large_object_size = 100
+
+# What multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to 5M. (integer value)
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload in S3.
+# (integer value)
+#s3_store_thread_pools = 10
+
+# Enable the use of a proxy. (boolean value)
+#s3_store_enable_proxy = false
+
+# Address or hostname for the proxy server. (string value)
+#s3_store_proxy_host = <None>
+
+# The port to use when connecting over a proxy. (integer value)
+#s3_store_proxy_port = 8080
+
+# The username to connect to the proxy. (string value)
+#s3_store_proxy_user = <None>
+
+# The password to use when connecting over a proxy. (string value)
+#s3_store_proxy_password = <None>
+
+# Images will be chunked into objects of this size (in megabytes). For
+# best performance, this should be a power of two. (integer value)
+#sheepdog_store_chunk_size = 64
+
+# Port of sheep daemon. (integer value)
+#sheepdog_store_port = 7000
+
+# IP address of sheep daemon. (string value)
+#sheepdog_store_address = localhost
+
+# Directory to which the Filesystem backend store writes images.
+# (string value)
+filesystem_store_datadir = {RIFT_VAR_ROOT}/glance/images/
+
+# List of directories and its priorities to which the Filesystem
+# backend store writes images. (multi valued)
+#filesystem_store_datadirs =
+
+# The path to a file which contains the metadata to be returned with
+# any location associated with this store.  The file must contain a
+# valid JSON object. The object should contain the keys 'id' and
+# 'mountpoint'. The value for both keys should be 'string'. (string
+# value)
+#filesystem_store_metadata_file = <None>
+
+# The required permission for created image file. In this way the user
+# other service used, e.g. Nova, who consumes the image could be the
+# exclusive member of the group that owns the files created. Assigning
+# it less then or equal to zero means don't change the default
+# permission of the file. This value will be decoded as an octal
+# digit. (integer value)
+#filesystem_store_file_perm = 0
+
+# ESX/ESXi or vCenter Server target system. The server value can be an
+# IP address or a DNS name. (string value)
+#vmware_server_host = <None>
+
+# Username for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_username = <None>
+
+# Password for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_password = <None>
+
+# Number of times VMware ESX/VC server API must be retried upon
+# connection related issues. (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks invoked on VMware ESX/VC
+# server. (integer value)
+#vmware_task_poll_interval = 5
+
+# The name of the directory where the glance images will be stored in
+# the VMware datastore. (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# If true, the ESX/vCenter server certificate is not verified. If
+# false, then the default CA truststore is used for verification. This
+# option is ignored if "vmware_ca_file" is set. (boolean value)
+# Deprecated group/name - [DEFAULT]/vmware_api_insecure
+#vmware_insecure = false
+
+# Specify a CA bundle file to use in verifying the ESX/vCenter server
+# certificate. (string value)
+#vmware_ca_file = <None>
+
+# A list of datastores where the image can be stored. This option may
+# be specified multiple times for specifying multiple datastores. The
+# datastore name should be specified after its datacenter path,
+# seperated by ":". An optional weight may be given after the
+# datastore name, seperated again by ":". Thus, the required format
+# becomes <datacenter_path>:<datastore_name>:<optional_weight>. When
+# adding an image, the datastore with highest weight will be selected,
+# unless there is not enough free space available in cases where the
+# image size is already known. If no weight is given, it is assumed to
+# be zero and the directory will be considered for selection last. If
+# multiple datastores have the same weight, then the one with the most
+# free space available is selected. (multi valued)
+#vmware_datastores =
+
+
+[image_format]
+
+#
+# From glance.api
+#
+
+# Supported values for the 'container_format' image attribute (list
+# value)
+# Deprecated group/name - [DEFAULT]/container_formats
+container_formats = ami,ari,aki,bare,ovf,ova,docker
+
+# Supported values for the 'disk_format' image attribute (list value)
+# Deprecated group/name - [DEFAULT]/disk_formats
+disk_formats = ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but
+# delegate the authorization decision to downstream WSGI components.
+# (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server.
+# (integer value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with
+# Identity API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching.
+# If left undefined, tokens will instead be cached in-process. (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the
+# middleware caches previously-seen tokens for a configurable duration
+# (in seconds). Set to -1 to disable caching completely. (integer
+# value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is
+# retrieved from the Identity service (in seconds). A high number of
+# revocation events combined with a low cache duration may
+# significantly reduce performance. (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
+# encrypted and authenticated in the cache. If the value is not one of
+# these options or empty, auth_token will raise an exception on
+# initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This
+# string is used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead
+# before it is tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a
+# memcached server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held
+# unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a
+# memcached client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool.
+# The advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If
+# False, middleware will not ask for service catalog on token
+# validation and will not set the X-Service-Catalog header. (boolean
+# value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to:
+# "disabled" to not check token binding. "permissive" (default) to
+# validate binding information if the bind type is of a form known to
+# the server and ignore it if not. "strict" like "permissive" but if
+# the bind type is unknown the token will be rejected. "required" any
+# form of token binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This
+# requires that PKI tokens are configured on the identity server.
+# (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single
+# algorithm or multiple. The algorithms are those supported by Python
+# standard hashlib.new(). The hashes will be tried in the order given,
+# so put the preferred one first for performance. The result of the
+# first hash will be stored in the cache. This will typically be set
+# to multiple values only while migrating from a less secure algorithm
+# to a more secure one. Once all the old tokens are expired this
+# option should be set to a single value for better performance. (list
+# value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown
+# value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified
+# directory should only be writable by the user running the processes
+# that need locking. Defaults to environment variable OSLO_LOCK_PATH.
+# If external locks are used, a lock path must be set. (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
+#lock_path = <None>
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may notbe available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except  those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 600
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer
+# value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating
+# point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating
+# point value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error
+# (floating point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`.
+# (integer value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available
+# (integer value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer
+# value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are
+# considered stale in seconds or None for no staleness. Stale
+# connections are closed on acquire. (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer
+# value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc reply listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending reply. -1 means infinite retry during rpc_timeout (integer
+# value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during
+# sending RPC message, -1 means infinite retry. If actual retry
+# attempts in not 0 the rpc request could be processed more then one
+# time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending RPC message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From glance.api
+#
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone] use the value "keystone" (string
+# value)
+flavor =
+
+# Name of the paste configuration file. (string value)
+config_file = {RIFT_INSTALL}/etc/glance/glance-api-paste.ini
+
+
+[profiler]
+
+#
+# From glance.api
+#
+
+# If False fully disable profiling feature. (boolean value)
+#enabled = false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy = false
+
+# Secret key to use to sign Glance API and Glance Registry services
+# tracing messages. (string value)
+#hmac_keys = SECRET_KEY
+
+
+[store_type_location_strategy]
+
+#
+# From glance.api
+#
+
+# The store names to use to get store preference order. The name must
+# be registered by one of the stores defined by the 'stores' config
+# option. This option will be applied when you using 'store_type'
+# option as image location strategy defined by the 'location_strategy'
+# config option. (list value)
+#store_type_preference =
+
+
+[task]
+
+#
+# From glance.api
+#
+
+# Time in hours for which a task lives after, either succeeding or
+# failing (integer value)
+# Deprecated group/name - [DEFAULT]/task_time_to_live
+#task_time_to_live = 48
+
+# Specifies which task executor to be used to run the task scripts.
+# (string value)
+#task_executor = taskflow
+
+# Work dir for asynchronous task operations. The directory set here
+# will be used to operate over images - normally before they are
+# imported in the destination store. When providing work dir, make
+# sure enough space is provided for concurrent tasks to run
+# efficiently without running out of space. A rough estimation can be
+# done by multiplying the number of `max_workers` - or the N of
+# workers running - by an average image size (e.g 500MB). The image
+# size estimation should be done based on the average size in your
+# deployment. Note that depending on the tasks running you may need to
+# multiply this number by some factor depending on what the task does.
+# For example, you may want to double the available size if image
+# conversion is enabled. All this being said, remember these are just
+# estimations and you should do them based on the worst case scenario
+# and be prepared to act in case they were wrong. (string value)
+#work_dir = <None>
+
+
+[taskflow_executor]
+
+#
+# From glance.api
+#
+
+# The mode in which the engine will run. Can be 'serial' or
+# 'parallel'. (string value)
+# Allowed values: serial, parallel
+#engine_mode = parallel
+
+# The number of parallel activities executed at the same time by the
+# engine. The value can be greater than one when the engine mode is
+# 'parallel'. (integer value)
+# Deprecated group/name - [task]/eventlet_executor_pool_size
+#max_workers = 10
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-cache.conf
new file mode 100644 (file)
index 0000000..bc7337c
--- /dev/null
@@ -0,0 +1,338 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.cache
+#
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+#data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+#enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+#enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+#enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+#enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# The path to the sqlite file database that will be used for image
+# cache management. (string value)
+#image_cache_sqlite_db = cache.db
+
+# The driver to use for image cache management. (string value)
+#image_cache_driver = sqlite
+
+# The upper limit (the maximum size of accumulated cache in bytes)
+# beyond which the cache pruner, if running, starts cleaning the image
+# cache. (integer value)
+#image_cache_max_size = 10737418240
+
+# The amount of time to let an incomplete image remain in the cache,
+# before the cache cleaner, if running, will remove the incomplete
+# image. (integer value)
+#image_cache_stall_time = 86400
+
+# Base directory that the image cache uses. (string value)
+image_cache_dir = {RIFT_VAR_ROOT}/glance/image-cache/
+
+# Address to find the registry server. (string value)
+registry_host = 127.0.0.1
+
+# Port the registry server is listening on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+registry_port = 9191
+
+# Whether to pass through the user token when making requests to the
+# registry. To prevent failures with token expiration during big files
+# upload, it is recommended to set this parameter to False.If
+# "use_user_token" is not in effect, then admin credentials can be
+# specified. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#use_user_token = true
+
+# The administrators user name. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_user = <None>
+
+# The administrators password. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_password = <None>
+
+# The tenant name of the administrative user. If "use_user_token" is
+# not in effect, then admin tenant name can be specified. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#admin_tenant_name = <None>
+
+# The URL to the keystone service. If "use_user_token" is not in
+# effect and using keystone auth, then URL of keystone can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_url = <None>
+
+# The strategy to use for authentication. If "use_user_token" is not
+# in effect, then auth strategy can be specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_strategy = noauth
+
+# The region for the authentication service. If "use_user_token" is
+# not in effect and using keystone auth, then region name can be
+# specified. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: This option was considered harmful and has been deprecated
+# in M release. It will be removed in O release. For more information
+# read OSSN-0060. Related functionality with uploading big images has
+# been implemented with Keystone trusts support.
+#auth_region = <None>
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = false
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/image-cache.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-manage.conf
new file mode 100644 (file)
index 0000000..4790cf9
--- /dev/null
@@ -0,0 +1,226 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = false
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/image-manage.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry-paste.ini
new file mode 100644 (file)
index 0000000..492dbc6
--- /dev/null
@@ -0,0 +1,35 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:glance-registry]
+pipeline = healthcheck osprofiler unauthenticated-context registryapp
+
+# Use this pipeline for keystone auth
+[pipeline:glance-registry-keystone]
+pipeline = healthcheck osprofiler authtoken context registryapp
+
+# Use this pipeline for authZ only. This means that the registry will treat a
+# user as authenticated without making requests to keystone to reauthenticate
+# the user.
+[pipeline:glance-registry-trusted-auth]
+pipeline = healthcheck osprofiler context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.registry.api:API.factory
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
+backends = disable_by_file
+disable_by_file_path = /etc/glance/healthcheck_disable
+
+[filter:context]
+paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY  #DEPRECATED
+enabled = yes  #DEPRECATED
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/glance-registry.conf
new file mode 100644 (file)
index 0000000..0fb7ed0
--- /dev/null
@@ -0,0 +1,1431 @@
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+[DEFAULT]
+
+#
+# From glance.registry
+#
+
+# When true, this option sets the owner of an image to be the tenant.
+# Otherwise, the owner of the  image will be the authenticated user
+# issuing the request. (boolean value)
+#owner_is_tenant = true
+
+# Role used to identify an authenticated user as administrator.
+# (string value)
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+#allow_anonymous_access = false
+
+# Limits request ID length. (integer value)
+#max_request_id_length = 64
+
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
+
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
+
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
+
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
+
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution! Setting this to
+# true overrides the show_image_direct_url option. (boolean value)
+#show_multiple_locations = false
+
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+# Maximum value: 9223372036854775808
+#image_size_cap = 1099511627776
+
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+#enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+#enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+#enable_v1_registry = true
+
+# Deploy the v2 OpenStack Registry API. (boolean value)
+#enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#pydev_worker_debug_port = 5678
+
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
+
+# Digest algorithm which will be used for digital signature. Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha256
+
+# Address to bind the server.  Useful when selecting a particular
+# network interface. (string value)
+bind_host = 0.0.0.0
+
+# The port on which the server will listen. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = 9191
+
+# The backlog value that will be used when creating the TCP listener
+# socket. (integer value)
+#backlog = 4096
+
+# The value for the socket option TCP_KEEPIDLE.  This is the time in
+# seconds that the connection must be idle before TCP starts sending
+# keepalive probes. (integer value)
+#tcp_keepidle = 600
+
+# CA certificate file to use to verify connecting clients. (string
+# value)
+#ca_file = <None>
+
+# Certificate file to use when starting API server securely. (string
+# value)
+#cert_file = <None>
+
+# Private key file to use when starting API server securely. (string
+# value)
+#key_file = <None>
+
+# The number of child process workers that will be created to service
+# requests. The default will be equal to the number of CPUs available.
+# (integer value)
+#workers = <None>
+
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated by the Keystone v3 API with big service catalogs (integer
+# value)
+#max_header_line = 16384
+
+# If False, server will return the header "Connection: close", If
+# True, server will return "Connection: Keep-Alive" in its responses.
+# In order to close the client socket connection explicitly after the
+# response is sent and read successfully by the client, you simply
+# have to set this option to False when you create a wsgi server.
+# (boolean value)
+#http_keepalive = true
+
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+debug = true
+
+# If set to false, the logging level will be set to WARNING instead of
+# the default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+verbose = true
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example,
+# logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+log_file = {RIFT_VAR_ROOT}/log/glance/glance-registry.log
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
+
+# Type of concurrency used. Either "native" or "eventlet" (string
+# value)
+#rpc_zmq_concurrency = eventlet
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no
+# linger period. Pending messages shall be discarded immediately when
+# the socket is closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
+
+# The default number of seconds that poll should wait. Poll raises
+# timeout exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about
+# existing target ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy.
+# (boolean value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with
+# ZMQBindError. (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend option
+# and driver specific configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers
+# include amqp and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+sqlite_db = {RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+sql_connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+connection=sqlite:///{RIFT_VAR_ROOT}/glance/glance-registry.db
+
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
+#db_max_retries = 20
+
+db_auto_create = True
+
+#
+# From oslo.db.concurrency
+#
+
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
+
+
+[glance_store]
+
+#
+# From glance.store
+#
+
+# List of stores enabled. Valid stores are: cinder, file, http, rbd,
+# sheepdog, swift, s3, vsphere (list value)
+#stores = file,http
+
+# Default scheme to use to store image data. The scheme must be
+# registered by one of the stores defined by the 'stores' config
+# option. (string value)
+#default_store = file
+
+# Minimum interval seconds to execute updating dynamic storage
+# capabilities based on backend status then. It's not a periodic
+# routine, the update logic will be executed only when interval
+# seconds elapsed and an operation of store has triggered. The feature
+# will be enabled only when the option value greater then zero.
+# (integer value)
+#store_capabilities_update_min_interval = 0
+
+# Specify the path to the CA bundle file to use in verifying the
+# remote server certificate. (string value)
+#https_ca_certificates_file = <None>
+
+# If true, the remote server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "https_ca_certificates_file" is set. (boolean value)
+#https_insecure = true
+
+# Specify the http/https proxy information that should be used to
+# connect to the remote server. The proxy information should be a key
+# value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can
+# specify proxies for multiple schemes by seperating the key value
+# pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080.
+# (dict value)
+#http_proxy_information =
+
+# If True, swiftclient won't check for a valid SSL certificate when
+# authenticating. (boolean value)
+#swift_store_auth_insecure = false
+
+# A string giving the CA certificate file to use in SSL connections
+# for verifying certs. (string value)
+#swift_store_cacert = <None>
+
+# The region of the swift endpoint to be used for single tenant. This
+# setting is only necessary if the tenant has multiple swift
+# endpoints. (string value)
+#swift_store_region = <None>
+
+# If set, the configured endpoint will be used. If None, the storage
+# url from the auth response will be used. (string value)
+#swift_store_endpoint = <None>
+
+# A string giving the endpoint type of the swift service to use
+# (publicURL, adminURL or internalURL). This setting is only used if
+# swift_store_auth_version is 2. (string value)
+#swift_store_endpoint_type = publicURL
+
+# A string giving the service type of the swift service to use. This
+# setting is only used if swift_store_auth_version is 2. (string
+# value)
+#swift_store_service_type = object-store
+
+# Container within the account that the account should use for storing
+# images in Swift when using single container mode. In multiple
+# container mode, this will be the prefix for all containers. (string
+# value)
+#swift_store_container = glance
+
+# The size, in MB, that Glance will start chunking image files and do
+# a large object manifest in Swift. (integer value)
+#swift_store_large_object_size = 5120
+
+# The amount of data written to a temporary disk buffer during the
+# process of chunking the image file. (integer value)
+#swift_store_large_object_chunk_size = 200
+
+# A boolean value that determines if we create the container if it
+# does not exist. (boolean value)
+#swift_store_create_container_on_put = false
+
+# If set to True, enables multi-tenant storage mode which causes
+# Glance images to be stored in tenant specific Swift accounts.
+# (boolean value)
+#swift_store_multi_tenant = false
+
+# When set to 0, a single-tenant store will only use one container to
+# store all images. When set to an integer value between 1 and 32, a
+# single-tenant store will use multiple containers to store images,
+# and this value will determine how many containers are created.Used
+# only when swift_store_multi_tenant is disabled. The total number of
+# containers that will be used is equal to 16^N, so if this config
+# option is set to 2, then 16^2=256 containers will be used to store
+# images. (integer value)
+#swift_store_multiple_containers_seed = 0
+
+# A list of tenants that will be granted read/write access on all
+# Swift containers created by Glance in multi-tenant mode. (list
+# value)
+#swift_store_admin_tenants =
+
+# If set to False, disables SSL layer compression of https swift
+# requests. Setting to False may improve performance for images which
+# are already in a compressed format, eg qcow2. (boolean value)
+#swift_store_ssl_compression = true
+
+# The number of times a Swift download will be retried before the
+# request fails. (integer value)
+#swift_store_retry_get_count = 0
+
+# The period of time (in seconds) before token expirationwhen
+# glance_store will try to reques new user token. Default value 60 sec
+# means that if token is going to expire in 1 min then glance_store
+# request new user token. (integer value)
+#swift_store_expire_soon_interval = 60
+
+# If set to True create a trust for each add/get request to Multi-
+# tenant store in order to prevent authentication token to be expired
+# during uploading/downloading data. If set to False then user token
+# is used for Swift connection (so no overhead on trust creation).
+# Please note that this option is considered only and only if
+# swift_store_multi_tenant=True (boolean value)
+#swift_store_use_trusts = true
+
+# The reference to the default swift account/backing store parameters
+# to use for adding new images. (string value)
+#default_swift_reference = ref1
+
+# Version of the authentication service to use. Valid versions are 2
+# and 3 for keystone and 1 (deprecated) for swauth and rackspace.
+# (deprecated - use "auth_version" in swift_store_config_file) (string
+# value)
+#swift_store_auth_version = 2
+
+# The address where the Swift authentication service is listening.
+# (deprecated - use "auth_address" in swift_store_config_file) (string
+# value)
+#swift_store_auth_address = <None>
+
+# The user to authenticate against the Swift authentication service
+# (deprecated - use "user" in swift_store_config_file) (string value)
+#swift_store_user = <None>
+
+# Auth key for the user authenticating against the Swift
+# authentication service. (deprecated - use "key" in
+# swift_store_config_file) (string value)
+#swift_store_key = <None>
+
+# The config file that has the swift account(s)configs. (string value)
+#swift_store_config_file = <None>
+
+# RADOS images will be chunked into objects of this size (in
+# megabytes). For best performance, this should be a power of two.
+# (integer value)
+#rbd_store_chunk_size = 8
+
+# RADOS pool in which images are stored. (string value)
+#rbd_store_pool = images
+
+# RADOS user to authenticate as (only applicable if using Cephx. If
+# <None>, a default will be chosen based on the client. section in
+# rbd_store_ceph_conf) (string value)
+#rbd_store_user = <None>
+
+# Ceph configuration file path. If <None>, librados will locate the
+# default config. If using cephx authentication, this file should
+# include a reference to the right keyring in a client.<USER> section
+# (string value)
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# Timeout value (in seconds) used when connecting to ceph cluster. If
+# value <= 0, no timeout is set and default librados value is used.
+# (integer value)
+#rados_connect_timeout = 0
+
+# Info to match when looking for cinder in the service catalog. Format
+# is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volumev2::publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v2/%(tenant)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node. If specified, it will be used to locate
+# OpenStack services for stores. (string value)
+# Deprecated group/name - [DEFAULT]/os_region_name
+#cinder_os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests.
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Time period of time in seconds to wait for a cinder volume
+# transition to complete. (integer value)
+#cinder_state_transition_timeout = 300
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = false
+
+# The address where the Cinder authentication service is listening. If
+# <None>, the cinder endpoint in the service catalog is used. (string
+# value)
+#cinder_store_auth_address = <None>
+
+# User name to authenticate against Cinder. If <None>, the user of
+# current context is used. (string value)
+#cinder_store_user_name = <None>
+
+# Password for the user authenticating against Cinder. If <None>, the
+# current context auth token is used. (string value)
+#cinder_store_password = <None>
+
+# Project name where the image is stored in Cinder. If <None>, the
+# project in current context is used. (string value)
+#cinder_store_project_name = <None>
+
+# Path to the rootwrap configuration file to use for running commands
+# as root. (string value)
+#rootwrap_config = /etc/glance/rootwrap.conf
+
+# The host where the S3 server is listening. (string value)
+#s3_store_host = <None>
+
+# The S3 query token access key. (string value)
+#s3_store_access_key = <None>
+
+# The S3 query token secret key. (string value)
+#s3_store_secret_key = <None>
+
+# The S3 bucket to be used to store the Glance data. (string value)
+#s3_store_bucket = <None>
+
+# The local directory where uploads will be staged before they are
+# transferred into S3. (string value)
+#s3_store_object_buffer_dir = <None>
+
+# A boolean to determine if the S3 bucket should be created on upload
+# if it does not exist or if an error should be returned to the user.
+# (boolean value)
+#s3_store_create_bucket_on_put = false
+
+# The S3 calling format used to determine the bucket. Either subdomain
+# or path can be used. (string value)
+#s3_store_bucket_url_format = subdomain
+
+# What size, in MB, should S3 start chunking image files and do a
+# multipart upload in S3. (integer value)
+#s3_store_large_object_size = 100
+
+# What multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to 5M. (integer value)
+#s3_store_large_object_chunk_size = 10
+
+# The number of thread pools to perform a multipart upload in S3.
+# (integer value)
+#s3_store_thread_pools = 10
+
+# Enable the use of a proxy. (boolean value)
+#s3_store_enable_proxy = false
+
+# Address or hostname for the proxy server. (string value)
+#s3_store_proxy_host = <None>
+
+# The port to use when connecting over a proxy. (integer value)
+#s3_store_proxy_port = 8080
+
+# The username to connect to the proxy. (string value)
+#s3_store_proxy_user = <None>
+
+# The password to use when connecting over a proxy. (string value)
+#s3_store_proxy_password = <None>
+
+# Images will be chunked into objects of this size (in megabytes). For
+# best performance, this should be a power of two. (integer value)
+#sheepdog_store_chunk_size = 64
+
+# Port of sheep daemon. (integer value)
+#sheepdog_store_port = 7000
+
+# IP address of sheep daemon. (string value)
+#sheepdog_store_address = localhost
+
+# Directory to which the Filesystem backend store writes images.
+# (string value)
+#filesystem_store_datadir = /var/lib/glance/images
+
+# List of directories and its priorities to which the Filesystem
+# backend store writes images. (multi valued)
+#filesystem_store_datadirs =
+
+# The path to a file which contains the metadata to be returned with
+# any location associated with this store.  The file must contain a
+# valid JSON object. The object should contain the keys 'id' and
+# 'mountpoint'. The value for both keys should be 'string'. (string
+# value)
+#filesystem_store_metadata_file = <None>
+
+# The required permission for created image file. In this way the user
+# other service used, e.g. Nova, who consumes the image could be the
+# exclusive member of the group that owns the files created. Assigning
+# it less then or equal to zero means don't change the default
+# permission of the file. This value will be decoded as an octal
+# digit. (integer value)
+#filesystem_store_file_perm = 0
+
+# ESX/ESXi or vCenter Server target system. The server value can be an
+# IP address or a DNS name. (string value)
+#vmware_server_host = <None>
+
+# Username for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_username = <None>
+
+# Password for authenticating with VMware ESX/VC server. (string
+# value)
+#vmware_server_password = <None>
+
+# Number of times VMware ESX/VC server API must be retried upon
+# connection related issues. (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks invoked on VMware ESX/VC
+# server. (integer value)
+#vmware_task_poll_interval = 5
+
+# The name of the directory where the glance images will be stored in
+# the VMware datastore. (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# If true, the ESX/vCenter server certificate is not verified. If
+# false, then the default CA truststore is used for verification. This
+# option is ignored if "vmware_ca_file" is set. (boolean value)
+# Deprecated group/name - [DEFAULT]/vmware_api_insecure
+#vmware_insecure = false
+
+# Specify a CA bundle file to use in verifying the ESX/vCenter server
+# certificate. (string value)
+#vmware_ca_file = <None>
+
+# A list of datastores where the image can be stored. This option may
+# be specified multiple times for specifying multiple datastores. The
+# datastore name should be specified after its datacenter path,
+# seperated by ":". An optional weight may be given after the
+# datastore name, seperated again by ":". Thus, the required format
+# becomes <datacenter_path>:<datastore_name>:<optional_weight>. When
+# adding an image, the datastore with highest weight will be selected,
+# unless there is not enough free space available in cases where the
+# image size is already known. If no weight is given, it is assumed to
+# be zero and the directory will be considered for selection last. If
+# multiple datastores have the same weight, then the one with the most
+# free space available is selected. (multi valued)
+#vmware_datastores =
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but
+# delegate the authorization decision to downstream WSGI components.
+# (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server.
+# (integer value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with
+# Identity API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Directory used to cache files related to PKI tokens. (string value)
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching.
+# If left undefined, tokens will instead be cached in-process. (list
+# value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the
+# middleware caches previously-seen tokens for a configurable duration
+# (in seconds). Set to -1 to disable caching completely. (integer
+# value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is
+# retrieved from the Identity service (in seconds). A high number of
+# revocation events combined with a low cache duration may
+# significantly reduce performance. (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
+# encrypted and authenticated in the cache. If the value is not one of
+# these options or empty, auth_token will raise an exception on
+# initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This
+# string is used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead
+# before it is tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a
+# memcached server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held
+# unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a
+# memcached client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool.
+# The advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If
+# False, middleware will not ask for service catalog on token
+# validation and will not set the X-Service-Catalog header. (boolean
+# value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to:
+# "disabled" to not check token binding. "permissive" (default) to
+# validate binding information if the bind type is of a form known to
+# the server and ignore it if not. "strict" like "permissive" but if
+# the bind type is unknown the token will be rejected. "required" any
+# form of token binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This
+# requires that PKI tokens are configured on the identity server.
+# (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single
+# algorithm or multiple. The algorithms are those supported by Python
+# standard hashlib.new(). The hashes will be tried in the order given,
+# so put the preferred one first for performance. The result of the
+# first hash will be stored in the cache. This will typically be set
+# to multiple values only while migrating from a less secure algorithm
+# to a more secure one. Once all the old tokens are expired this
+# option should be set to a single value for better performance. (list
+# value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown
+# value)
+#auth_section = <None>
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host = 127.0.0.1
+
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
+
+# Password for Redis server (optional). (string value)
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# address prefix used when sending to a specific server (string value)
+# Deprecated group/name - [amqp1]/server_request_prefix
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+# Deprecated group/name - [amqp1]/broadcast_prefix
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+# Deprecated group/name - [amqp1]/group_request_prefix
+#group_request_prefix = unicast
+
+# Name for the AMQP container (string value)
+# Deprecated group/name - [amqp1]/container_name
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+# Deprecated group/name - [amqp1]/idle_timeout
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+# Deprecated group/name - [amqp1]/trace
+#trace = false
+
+# CA certificate PEM file to verify server certificate (string value)
+# Deprecated group/name - [amqp1]/ssl_ca_file
+#ssl_ca_file =
+
+# Identifying certificate PEM file to present to clients (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_cert_file
+#ssl_cert_file =
+
+# Private key PEM file used to sign cert_file certificate (string
+# value)
+# Deprecated group/name - [amqp1]/ssl_key_file
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+# Deprecated group/name - [amqp1]/ssl_key_password
+#ssl_key_password = <None>
+
+# Accept clients using either SSL or plain TCP (boolean value)
+# Deprecated group/name - [amqp1]/allow_insecure_clients
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_auto_delete
+#amqp_auto_delete = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_version
+#kombu_ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
+#kombu_ssl_keyfile =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
+#kombu_ssl_certfile =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
+#kombu_ssl_ca_certs =
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may notbe available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ broker address where a single node is used. (string
+# value)
+# Deprecated group/name - [DEFAULT]/rabbit_host
+#rabbit_host = localhost
+
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/rabbit_port
+#rabbit_port = 5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+# Deprecated group/name - [DEFAULT]/rabbit_hosts
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
+#rabbit_use_ssl = false
+
+# The RabbitMQ userid. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_userid
+#rabbit_userid = guest
+
+# The RabbitMQ password. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_password
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_login_method
+#rabbit_login_method = AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# Deprecated group/name - [DEFAULT]/rabbit_max_retries
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except  those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 600
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/fake_rabbit
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer
+# value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating
+# point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating
+# point value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error
+# (floating point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`.
+# (integer value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available
+# (integer value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer
+# value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are
+# considered stale in seconds or None for no staleness. Stale
+# connections are closed on acquire. (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer
+# value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# rpc reply listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during
+# sending reply. -1 means infinite retry during rpc_timeout (integer
+# value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during
+# sending RPC message, -1 means infinite retry. If actual retry
+# attempts in not 0 the rpc request could be processed more then one
+# time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during
+# sending RPC message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[paste_deploy]
+
+#
+# From glance.registry
+#
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone] use the value "keystone" (string
+# value)
+#flavor = <None>
+
+# Name of the paste configuration file. (string value)
+config_file = {RIFT_INSTALL}/etc/glance/glance-registry-paste.ini
+
+
+[profiler]
+
+#
+# From glance.registry
+#
+
+# If False fully disable profiling feature. (boolean value)
+#enabled = false
+
+# If False doesn't trace SQL requests. (boolean value)
+#trace_sqlalchemy = false
+
+# Secret key to use to sign Glance API and Glance Registry services
+# tracing messages. (string value)
+#hmac_keys = SECRET_KEY
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/policy.json
new file mode 100644 (file)
index 0000000..f49bc08
--- /dev/null
@@ -0,0 +1,61 @@
+{
+    "context_is_admin":  "role:admin",
+    "default": "",
+
+    "add_image": "",
+    "delete_image": "",
+    "get_image": "",
+    "get_images": "",
+    "modify_image": "",
+    "publicize_image": "role:admin",
+    "copy_from": "",
+
+    "download_image": "",
+    "upload_image": "",
+
+    "delete_image_location": "",
+    "get_image_location": "",
+    "set_image_location": "",
+
+    "add_member": "",
+    "delete_member": "",
+    "get_member": "",
+    "get_members": "",
+    "modify_member": "",
+
+    "manage_image_cache": "role:admin",
+
+    "get_task": "role:admin",
+    "get_tasks": "role:admin",
+    "add_task": "role:admin",
+    "modify_task": "role:admin",
+
+    "deactivate": "",
+    "reactivate": "",
+
+    "get_metadef_namespace": "",
+    "get_metadef_namespaces":"",
+    "modify_metadef_namespace":"",
+    "add_metadef_namespace":"",
+
+    "get_metadef_object":"",
+    "get_metadef_objects":"",
+    "modify_metadef_object":"",
+    "add_metadef_object":"",
+
+    "list_metadef_resource_types":"",
+    "get_metadef_resource_type":"",
+    "add_metadef_resource_type_association":"",
+
+    "get_metadef_property":"",
+    "get_metadef_properties":"",
+    "modify_metadef_property":"",
+    "add_metadef_property":"",
+
+    "get_metadef_tag":"",
+    "get_metadef_tags":"",
+    "modify_metadef_tag":"",
+    "add_metadef_tag":"",
+    "add_metadef_tags":""
+
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json b/rwlaunchpad/plugins/rwimagemgr/etc/ub16/schema-image.json
new file mode 100644 (file)
index 0000000..69c2f85
--- /dev/null
@@ -0,0 +1,28 @@
+{
+    "kernel_id": {
+        "type": ["null", "string"],
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
+    },
+    "ramdisk_id": {
+        "type": ["null", "string"],
+        "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+        "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
+    },
+    "instance_uuid": {
+        "type": "string",
+        "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)"
+    },
+    "architecture": {
+        "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_distro": {
+        "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
+        "type": "string"
+    },
+    "os_version": {
+        "description": "Operating system version as specified by the distributor",
+        "type": "string"
+    }
+}
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py b/rwlaunchpad/plugins/rwimagemgr/rift/imagemgr/client.py
new file mode 100644 (file)
index 0000000..10df45b
--- /dev/null
@@ -0,0 +1,174 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+
+import gi
+gi.require_version("RwImageMgmtYang", "1.0")
+from gi.repository import (
+    RwImageMgmtYang,
+)
+
+
+class UploadJobError(Exception):
+    pass
+
+
+class UploadJobFailed(UploadJobError):
+    pass
+
+
+class UploadJobCancelled(UploadJobFailed):
+    pass
+
+
+class UploadJobClient(object):
+    """ An upload job DTS client
+
+    This class wraps the DTS upload job actions to be more easily reused across
+    various components
+    """
+    def __init__(self, log, loop, dts):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+
+    def create_job(self, image_name, image_checksum, cloud_account_names=None):
+        """ Create an image upload_job and return an UploadJob instance
+
+        Arguments:
+            image_name - The name of the image in the image catalog
+            image_checksum - The checksum of the image in the catalog
+            cloud_account_names - Names of the cloud accounts to upload the image to.
+                                  None uploads the image to all cloud accounts.
+
+        Returns:
+            An UploadJob instance
+        """
+        create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+            "onboarded_image": {
+                "image_name": image_name,
+                "image_checksum": image_checksum,
+                }
+            })
+
+        if cloud_account_names is not None:
+            create_job_msg.cloud_account = cloud_account_names
+
+        query_iter = yield from self._dts.query_rpc(
+                "I,/rw-image-mgmt:create-upload-job",
+                0,
+                create_job_msg,
+                )
+
+        for fut_resp in query_iter:
+            rpc_result = (yield from fut_resp).result
+
+            job_id = rpc_result.job_id
+
+        return UploadJob(self._log, self._loop, self._dts, job_id)
+
+    def create_job_threadsafe(self, image_name, image_checksum, cloud_account_names=None):
+        """ A thread-safe, syncronous wrapper for create_job """
+        future = concurrent.futures.Future()
+
+        def on_done(asyncio_future):
+            if asyncio_future.exception() is not None:
+                future.set_exception(asyncio_future.exception())
+
+            elif asyncio_future.result() is not None:
+                future.set_result(asyncio_future.result())
+
+        def add_task():
+            task = self._loop.create_task(
+                    self.create_job(image_name, image_checksum, cloud_account_names)
+                    )
+            task.add_done_callback(on_done)
+
+        self._loop.call_soon_threadsafe(add_task)
+        return future.result()
+
+
+class UploadJob(object):
+    """ A handle for a image upload job """
+    def __init__(self, log, loop, dts, job_id):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._job_id = job_id
+
+    @asyncio.coroutine
+    def wait_until_complete(self):
+        """ Wait until the upload job reaches a terminal state
+
+        Raises:
+            UploadJobError: A generic exception occured in the upload job
+            UploadJobFailed: The upload job failed
+            UploadJobCancelled: The upload job was cancelled
+        """
+        self._log.debug("waiting for upload job %s to complete", self._job_id)
+        while True:
+            query_iter = yield from self._dts.query_read(
+                "D,/rw-image-mgmt:upload-jobs/rw-image-mgmt:job[rw-image-mgmt:id='{}']".format(
+                    self._job_id
+                )
+            )
+            job_status_msg = None
+            for fut_resp in query_iter:
+                job_status_msg = (yield from fut_resp).result
+                break
+
+            if job_status_msg is None:
+                raise UploadJobError("did not get a status response for job_id: %s",
+                                     self._job_id)
+
+            if job_status_msg.status == "COMPLETED":
+                msg = "upload job %s completed successfully" % self._job_id
+                self._log.debug(msg)
+                return
+
+            elif job_status_msg.status == "FAILED":
+                msg = "upload job %s as not successful: %s" % (self._job_id, job_status_msg.status)
+                self._log.error(msg)
+                raise UploadJobFailed(msg)
+
+            elif job_status_msg.status == "CANCELLED":
+                msg = "upload job %s was cancelled" % self._job_id
+                self._log.error(msg)
+                raise UploadJobCancelled(msg)
+
+            yield from asyncio.sleep(.5, loop=self._loop)
+
+    def wait_until_complete_threadsafe(self):
+        """ A thread-safe, synchronous wrapper for wait_until_complete """
+
+        future = concurrent.futures.Future()
+
+        def on_done(asyncio_future):
+            if asyncio_future.exception() is not None:
+                future.set_exception(asyncio_future.exception())
+                return
+
+            future.set_result(asyncio_future.result())
+
+        def add_task():
+            task = self._loop.create_task(self.wait_until_complete())
+            task.add_done_callback(on_done)
+
+        self._loop.call_soon_threadsafe(add_task)
+        return future.result()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/__init__.py
new file mode 100644 (file)
index 0000000..c5c582e
--- /dev/null
@@ -0,0 +1 @@
+from .tasklet import ImageManagerTasklet
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_client.py
new file mode 100644 (file)
index 0000000..614c152
--- /dev/null
@@ -0,0 +1,357 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import itertools
+import logging
+import os
+import glanceclient
+import keystoneclient.v3.client as keystone_client
+from keystoneauth1 import (
+    identity as keystone_identity,
+    session as keystone_session
+    )
+
+from gi.repository import RwcalYang
+
+logger = logging.getLogger(name=__name__)
+
+
+class OpenstackImageError(Exception):
+    pass
+
+
+class OpenstackNonUniqueImageError(OpenstackImageError):
+    pass
+
+
+class OpenstackImageCreateError(Exception):
+    pass
+
+
+class OpenstackImageDeleteError(Exception):
+    pass
+
+
+class InvalidImageError(Exception):
+    pass
+
+
+class OpenstackAccount(object):
+    def __init__(self, auth_url, tenant, username, password):
+        self.auth_url = auth_url
+        self.tenant = tenant
+        self.username = username
+        self.password = password
+
+
+class OpenstackImage(object):
+    """ This value class encapsultes the RIFT-relevent glance image fields """
+
+    FIELDS = ["id", "name", "checksum", "disk_format",
+              "container_format", "size", "properties", "status"]
+    OPTIONAL_FIELDS = ["id", "checksum", "location"]
+
+    def __init__(self, name, disk_format, container_format, size,
+                 properties=None, id=None, checksum=None, status="saving",
+                 location=None):
+        self.name = name
+        self.disk_format = disk_format
+        self.container_format = container_format
+        self.size = size
+        self.properties = properties if properties is not None else {}
+        self.status = status
+
+        self.id = id
+        self.checksum = checksum
+
+    @classmethod
+    def from_image_response(cls, image):
+        """ Convert a image response from glance into a OpenstackImage
+
+        Arguments:
+            image - A glance image object (from glance_client.images.list() for example)
+
+        Returns:
+            An instance of OpenstackImage
+
+        Raises:
+            OpenstackImageError - Could not convert the response into a OpenstackImage object
+        """
+        missing_fields = [field for field in cls.FIELDS
+                          if field not in cls.OPTIONAL_FIELDS and not hasattr(image, field)]
+        if missing_fields:
+            raise OpenstackImageError(
+                    "Openstack image is missing required fields: %s" % missing_fields
+                    )
+
+        kwargs = {field: getattr(image, field) for field in cls.FIELDS}
+
+        return cls(**kwargs)
+
+
+class OpenstackKeystoneClient(object):
+    """ This class wraps the Keystone Client """
+    def __init__(self, ks_client):
+        self._ks_client = ks_client
+
+    @property
+    def auth_token(self):
+        return self._ks_client.auth_token
+
+    @classmethod
+    def from_openstack_account(cls, os_account):
+        ks_client = keystone_client.Client(
+                insecure=True,
+                auth_url=os_account.auth_url,
+                username=os_account.username,
+                password=os_account.password,
+                tenant_name=os_account.tenant
+                )
+
+        return cls(ks_client)
+
+    @property
+    def glance_endpoint(self):
+        """ Return the glance endpoint from the keystone service """
+        glance_ep = self._ks_client.service_catalog.url_for(
+                service_type='image',
+                endpoint_type='publicURL'
+                )
+
+        return glance_ep
+
+
+class OpenstackGlanceClient(object):
+    def __init__(self, log, glance_client):
+        self._log = log
+        self._client = glance_client
+
+    @classmethod
+    def from_ks_client(cls, log, ks_client):
+        """ Create a OpenstackGlanceClient from a keystone client instance
+
+        Arguments:
+            log - logger instance
+            ks_client - A keystone client instance
+        """
+
+        glance_ep = ks_client.glance_endpoint
+        glance_client = glanceclient.Client(
+                '1',
+                glance_ep,
+                token=ks_client.auth_token,
+                )
+
+        return cls(log, glance_client)
+
+    @classmethod
+    def from_token(cls, log, host, port, token):
+        """ Create a OpenstackGlanceClient instance using a keystone auth token
+
+        Arguments:
+            log - logger instance
+            host - the glance host
+            port - the glance port
+            token - the keystone token
+
+        Returns:
+            A OpenstackGlanceClient instance
+        """
+        endpoint = "http://{}:{}".format(host, port)
+        glance_client = glanceclient.Client("1", endpoint, token=token)
+        return cls(log, glance_client)
+
+    def get_image_list(self):
+        """ Return the list of images from the Glance server
+
+        Returns:
+            A list of OpenstackImage instances
+        """
+        images = []
+        for image in itertools.chain(
+                self._client.images.list(is_public=False),
+                self._client.images.list(is_public=True)
+                ):
+            images.append(OpenstackImage.from_image_response(image))
+
+        return images
+
+    def get_image_data(self, image_id):
+        """ Return a image bytes generator from a image id
+
+        Arguments:
+            image_id - An image id that exists on the glance server
+
+        Returns:
+            An generator which produces the image data bytestrings
+
+        Raises:
+            OpenstackImageError - Could not find the image id
+        """
+
+        try:
+            self._client.images.get(image_id)
+        except Exception as e:
+            msg = "Failed to find image from image: %s" % image_id
+            self._log.exception(msg)
+            raise OpenstackImageError(msg) from e
+
+        img_data = self._client.images.data(image_id)
+        return img_data
+
+    def find_active_image(self, id=None, name=None, checksum=None):
+        """ Find an active images on the glance server
+
+        Arguments:
+            id - the image id to match
+            name - the image name to match
+            checksum - the image checksum to match
+
+        Returns:
+            A OpenstackImage instance
+
+        Raises:
+            OpenstackImageError - could not find a matching image
+                                  with matching image name and checksum
+        """
+        if id is None and name is None:
+            raise ValueError("image id or image name must be provided")
+
+        self._log.debug("attempting to find active image with id %s name %s and checksum %s",
+                        id, name, checksum)
+
+        found_image = None
+
+        image_list = self.get_image_list()
+        self._log.debug("got image list from openstack: %s", image_list)
+        for image in self.get_image_list():
+            self._log.debug(image)
+            if image.status != "active":
+                continue
+
+            if id is not None:
+                if image.id != id:
+                    continue
+
+            if name is not None:
+                if image.name != name:
+                    continue
+
+            if checksum is not None:
+                if image.checksum != checksum:
+                    continue
+
+            if found_image is not None:
+                raise OpenstackNonUniqueImageError(
+                    "Found multiple images that matched the criteria.  Use image id to disambiguate."
+                    )
+
+            found_image = image
+
+        if found_image is None:
+            raise OpenstackImageError(
+                    "could not find an active image with id %s name %s and checksum %s" %
+                    (id, name, checksum))
+
+        return OpenstackImage.from_image_response(found_image)
+
+    def create_image_from_hdl(self, image, file_hdl):
+        """ Create an image on the glance server a file handle
+
+        Arguments:
+            image - An OpenstackImage instance
+            file_hdl - An open image file handle
+
+        Raises:
+            OpenstackImageCreateError - Could not upload the image
+        """
+        try:
+            self._client.images.create(
+                    name=image.name,
+                    is_public="False",
+                    disk_format=image.disk_format,
+                    container_format=image.container_format,
+                    data=file_hdl
+                    )
+        except Exception as e:
+            msg = "Failed to Openstack upload image"
+            self._log.exception(msg)
+            raise OpenstackImageCreateError(msg) from e
+
+    def create_image_from_url(self, image_url, image_name, image_checksum=None,
+                              disk_format=None, container_format=None):
+        """ Create an image on the glance server from a image url
+
+        Arguments:
+            image_url - An HTTP image url
+            image_name - An openstack image name (filename with proper extension)
+            image checksum - The image md5 checksum
+
+        Raises:
+            OpenstackImageCreateError - Could not create the image
+        """
+        def disk_format_from_image_name(image_name):
+            _, image_ext = os.path.splitext(image_name)
+            if not image_ext:
+                raise InvalidImageError("image name must have an extension")
+
+            # Strip off the .
+            image_ext = image_ext[1:]
+
+            if not hasattr(RwcalYang.DiskFormat, image_ext.upper()):
+                raise InvalidImageError("unknown image extension for disk format: %s", image_ext)
+
+            disk_format = image_ext.lower()
+            return disk_format
+
+        # If the disk format was not provided, attempt to match via the file
+        # extension.
+        if disk_format is None:
+            disk_format = disk_format_from_image_name(image_name)
+
+        if container_format is None:
+            container_format = "bare"
+
+        create_args = dict(
+            location=image_url,
+            name=image_name,
+            is_public="True",
+            disk_format=disk_format,
+            container_format=container_format,
+            )
+
+        if image_checksum is not None:
+            create_args["checksum"] = image_checksum
+
+        try:
+            self._log.debug("creating an image from url: %s", create_args)
+            image = self._client.images.create(**create_args)
+        except Exception as e:
+            msg = "Failed to create image from url in openstack"
+            self._log.exception(msg)
+            raise OpenstackImageCreateError(msg) from e
+
+        return OpenstackImage.from_image_response(image)
+
+    def delete_image_from_id(self, image_id):
+        self._log.info("Deleting image from catalog: %s", image_id)
+        try:
+            image = self._client.images.delete(image_id)
+        except Exception as e:
+            msg = "Failed to delete image %s in openstack" % image_id
+            self._log.exception(msg)
+            raise OpenstackImageDeleteError(msg)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/glance_proxy_server.py
new file mode 100644 (file)
index 0000000..9b3972e
--- /dev/null
@@ -0,0 +1,276 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+
+from .lib import quickproxy
+import rift.tasklets.tornado
+
+
+class GlanceConfig(object):
+    DEFAULT_HOST = "127.0.0.1"
+    DEFAULT_PORT = 9292
+    DEFAULT_TOKEN = "test"
+
+    def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, token=DEFAULT_TOKEN):
+        self.host = host
+        self.port = port
+        self.token = token
+
+
+class GlanceImageCreateRequest(object):
+    def __init__(self, name, size, checksum, disk_format, container_format):
+        self.name = name
+        self.size = size
+        self.checksum = checksum
+        self.disk_format = disk_format
+        self.container_format = container_format
+
+    def __repr__(self):
+        return "{}({})".format(
+                self.__class__.__name__,
+                dict(
+                    name=self.name,
+                    size=self.size,
+                    checksum=self.checksum,
+                    )
+                )
+
+    @classmethod
+    def from_header_dict(cls, header_dict):
+        """
+        curl -i -X POST -H 'x-image-meta-container_format: bare' -H
+        'Transfer-Encoding: chunked' -H 'User-Agent: python-glanceclient' -H
+        'x-image-meta-size: 13167616' -H 'x-image-meta-is_public: False' -H
+        'X-Auth-Token: test' -H 'Content-Type: application/octet-stream' -H
+        'x-image-meta-checksum: 64d7c1cd2b6f60c92c14662941cb7913' -H
+        'x-image-meta-disk_format: raw' -H 'x-image-meta-name:
+        cirros-0.3.2-x86_64-disk.img'
+        """
+
+        name = header_dict["x-image-meta-name"]
+        try:
+            size = int(header_dict["x-image-meta-size"])
+        except KeyError:
+            size = None
+
+        try:
+            checksum = header_dict["x-image-meta-checksum"]
+        except KeyError:
+            checksum = None
+
+        disk_format = header_dict["x-image-meta-disk_format"]
+        container_format = header_dict["x-image-meta-container_format"]
+
+        return cls(name=name, size=size, checksum=checksum,
+                   disk_format=disk_format, container_format=container_format)
+
+
+class GlanceImageCreateResponse(object):
+    def __init__(self, id, name, status, size, checksum):
+        self.id = id
+        self.name = name
+        self.status = status
+        self.size = size
+        self.checksum = checksum
+
+    def __repr__(self):
+        return "{}({})".format(
+                self.__class__.__name__,
+                dict(
+                    id=self.id,
+                    name=self.name,
+                    status=self.status,
+                    checksum=self.checksum,
+                    )
+                )
+
+    @classmethod
+    def from_response_body(cls, response_body):
+        """
+        {"image": {"status": "active", "deleted": false, "container_format":
+        "bare", "min_ram": 0, "updated_at": "2016-06-24T14:41:38.598199",
+        "owner": null, "min_disk": 0, "is_public": false, "deleted_at": null,
+        "id": "5903cb2d-53db-4343-b055-586475a077f5", "size": 13167616, "name":
+        "cirros-0.3.2-x86_64-disk.img", "checksum":
+        "64d7c1cd2b6f60c92c14662941cb7913", "created_at":
+        "2016-06-24T14:41:38.207356", "disk_format": "raw",
+        "properties": {}, "protected": false}}
+        """
+
+        response_dict = json.loads(response_body.decode())
+        image = response_dict["image"]
+
+        id = image["id"]
+        name = image["name"]
+        status = image["status"]
+        size = image["size"]
+        checksum = image["checksum"]
+
+        return cls(
+                id=id, name=name, status=status,
+                size=size, checksum=checksum
+                )
+
+
+class GlanceHTTPMockProxy(object):
+    def __init__(self, log, loop, on_http_request, on_http_response):
+        self._log = log
+        self._loop = loop
+        self._on_http_request = on_http_request
+        self._on_http_response = on_http_response
+
+    def start(self):
+        pass
+
+    def stop(self):
+        pass
+
+
+class QuickProxyServer(object):
+    """ This class implements a HTTP Proxy server
+    """
+    DEFAULT_PROXY_PORT = 9999
+    DEBUG_LEVEL = 0
+
+    def __init__(self, log, loop, proxy_port=DEFAULT_PROXY_PORT):
+        self._log = log
+        self._loop = loop
+        self._proxy_port = proxy_port
+
+        self._proxy_server = None
+
+    def __repr__(self):
+        return "{}(port={})".format(self.__class__.__name__, self._proxy_port)
+
+    def start(self, on_http_request, on_http_response):
+        """ Start the proxy server
+
+        Arguments:
+            on_http_request - A callback when a http request is initiated
+            on_http_response - A callback when a http response is initiated
+
+        """
+        self._log.debug("Starting %s", self)
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(
+                asyncio_loop=self._loop
+                )
+
+        self._proxy_server = quickproxy.run_proxy(
+                port=self._proxy_port,
+                req_callback=on_http_request,
+                resp_callback=on_http_response,
+                io_loop=io_loop,
+                debug_level=QuickProxyServer.DEBUG_LEVEL
+                )
+
+    def stop(self):
+        """ Stop the proxy server """
+        if self._proxy_server is None:
+            self._log.warning("%s already stopped")
+            return
+
+        self._log.debug("Stopping %s", self)
+        self._proxy_server.stop()
+        self._proxy_server = None
+
+
+class GlanceHTTPProxyServer(object):
+    """ This class implements a HTTP Proxy server
+
+    Proxying requests to glance has the following high-level advantages:
+       - Allows us to intercept HTTP requests and responses to hook in functionality
+       - Allows us to configure the glance catalog server and keep the endpoint the same
+    """
+
+    DEFAULT_GLANCE_CONFIG = GlanceConfig()
+
+    def __init__(self, log, loop,
+                 http_proxy_server,
+                 glance_config=DEFAULT_GLANCE_CONFIG,
+                 on_create_image_request=None,
+                 on_create_image_response=None,
+                 ):
+
+        self._log = log
+        self._loop = loop
+        self._http_proxy_server = http_proxy_server
+        self._glance_config = glance_config
+
+        self._on_create_image_request = on_create_image_request
+        self._on_create_image_response = on_create_image_response
+
+    def _handle_create_image_request(self, request):
+        image_request = GlanceImageCreateRequest.from_header_dict(request.headers)
+        self._log.debug("Parsed image request: %s", image_request)
+        if self._on_create_image_request is not None:
+            self._on_create_image_request(image_request)
+
+        # Store the GlanceImageCreateRequest in the request context so it
+        # is available in the response
+        request.context["image_request"] = image_request
+
+        return request
+
+    def _handle_create_image_response(self, response):
+        image_request = response.context["image_request"]
+
+        self._log.debug("Got response body: %s", response.body)
+        image_response = GlanceImageCreateResponse.from_response_body(response.body)
+        self._log.debug("Parsed image response: %s", image_response)
+        if self._on_create_image_response is not None:
+            response = self._on_create_image_response(image_response, image_request)
+
+        return response
+
+    def start(self):
+        """ Start the glance proxy server """
+        def request_callback(request):
+            # Redirect the request to the actual glance server
+            self._log.debug("Proxying request to glance (path: %s, method: %s)",
+                            request.path, request.method)
+
+            # Save the path and method to detect whether the response for
+            # for a create_image request
+            request.context["path"] = request.path
+            request.context["method"] = request.method
+
+            if request.path.endswith("images") and request.method == "POST":
+                request = self._handle_create_image_request(request)
+
+            # Redirect the request to the actual glance server
+            request.host = self._glance_config.host
+            request.port = self._glance_config.port
+
+            return request
+
+        def response_callback(response):
+            self._log.debug("Got glance request response: %s", response)
+
+            if response.context["path"].endswith("images") and response.context["method"] == "POST":
+                response = self._handle_create_image_response(response)
+
+            return response
+
+        self._http_proxy_server.start(
+                on_http_request=request_callback,
+                on_http_response=response_callback
+                )
+
+    def stop(self):
+        """ Stop the glance proxy server """
+        self._http_proxy_server.stop()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
new file mode 100644 (file)
index 0000000..3192a53
--- /dev/null
@@ -0,0 +1 @@
+from .proxy import *
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.crt
new file mode 100644 (file)
index 0000000..7bd1818
--- /dev/null
@@ -0,0 +1,13 @@
+-----BEGIN CERTIFICATE-----
+MIICATCCAWoCCQD3Gv0KNbBGNzANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
+VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
+cyBQdHkgTHRkMB4XDTE0MDIwMTA4MzYzMloXDTE0MDMwMzA4MzYzMlowRTELMAkG
+A1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0
+IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA60rQ
+dQM2tFLhNXtnlIxoegUw9FM/0DmMXYBKcRNEjJBegBaFO4+LALRsPvrl+eXerYL8
+UeRA7bgO4kkf3HokqWAsjUipTl8UV3RtDePE18m/kPLvuDO2bQMOn+94eqilZyzl
+PU/oUq+3MlwcPLyAldg/7UvkqJcq7R2MiCHv62kCAwEAATANBgkqhkiG9w0BAQUF
+AAOBgQCL/dRFUWuUfB+j4PQ+AorIimkpMsFH+7nOaiRXn1SWaYHu20h6Uxr2Xt5A
+C23lFEpRBVxfNnWbfyM0a8lhJ2/Ri/3cguVeiHJc0r93dyG+FVomRsq8doM1P9KP
+0q2Zbt3iAcuvKdJ6KJO3Zdx8DvHeJlfwymR4PyciLJgiSjJRAg==
+-----END CERTIFICATE-----
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/data/test.key
new file mode 100644 (file)
index 0000000..80e8579
--- /dev/null
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXgIBAAKBgQDrStB1Aza0UuE1e2eUjGh6BTD0Uz/QOYxdgEpxE0SMkF6AFoU7
+j4sAtGw++uX55d6tgvxR5EDtuA7iSR/ceiSpYCyNSKlOXxRXdG0N48TXyb+Q8u+4
+M7ZtAw6f73h6qKVnLOU9T+hSr7cyXBw8vICV2D/tS+SolyrtHYyIIe/raQIDAQAB
+AoGBAMSOry3RDXX+dpyTBqiV0wF8LLUuhnSQXq4NaiKkEfPK9ubR6WMkOt7P2k2S
+k2P7n9YbQmW25Hax990ZcUBh2RT7MdHpX8bICrS06MOuREgP9ldL5It9/4JpMiJV
+1+9t51TbzywE6dr1E8ROdgYtp65yBgJRzvxooF8YAPTVzJ4xAkEA/hFi1MD6MJgc
+j+dpUiyyO/02wjMGHBql+hqPxdt/cKPHAJEB3sFi8ussy6RFzn4PoVPlAAjRCT2M
+9+QBTJXLdwJBAO0U4EuvsVixZtRy0vCvXbOfkQnVcZupnc7ub3pFvY/rnfOQB4A8
+w7arBYkDeUwZsqpqlMz79wQh0pNgAgEX+B8CQQCYrioOYMn5WfAQKqkIUQPrOQgn
+PDJ3wSvtWPj9liLHtiRpGrtc+ipUgS+yUU4CAY+zC4+arbGxM+P7NHHzbDGRAkBu
+WVEs6VH6nlfmequEK5vJh3PSx+5hLcmuD30DxII/AsQ6IcfcAGx4EZI5+8vxh+SJ
+PaKU5pJK8hM5VW6ZY7axAkEAmLvHaC1cawx85m0azpRXF4JNxkauvXpzeWsAdX5p
+2aX43ke4yjbEA0HfC/8pfkS2ZV9dnIo3nrlFIu8TJPwwMw==
+-----END RSA PRIVATE KEY-----
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
new file mode 100644 (file)
index 0000000..7a7d85b
--- /dev/null
@@ -0,0 +1,436 @@
+import os
+import sys
+import urllib.parse as urlparse
+import pprint
+import http.cookies as Cookie
+import datetime
+import dateutil.parser
+from copy import copy
+
+import tornado.httpserver
+import tornado.ioloop
+import tornado.iostream
+import tornado.web
+import tornado.httpclient
+import tornado.escape
+
+__all__ = ['run_proxy', 'RequestObj', 'ResponseObj']
+
+DEFAULT_CALLBACK = lambda r: r
+
+
+class Bunch(object):
+    def __init__(self, **kwds):
+        self.__dict__.update(kwds)
+
+    def __str__(self):
+        return str(self.__dict__)
+
+
+class RequestObj(Bunch):
+    '''
+    An HTTP request object that contains the following request attributes:
+
+    protocol: either 'http' or 'https'
+    host: the destination hostname of the request
+    port: the port for the request
+    path: the path of the request ('/index.html' for example)
+    query: the query string ('?key=value&other=value')
+    fragment: the hash fragment ('#fragment')
+    method: request method ('GET', 'POST', etc)
+    username: always passed as None, but you can set it to override the user
+    password: None, but can be set to override the password
+    body: request body as a string
+    headers: a dictionary of header / value pairs
+        (for example {'Content-Type': 'text/plain', 'Content-Length': 200})
+    follow_redirects: true to follow redirects before returning a response
+    validate_cert: false to turn off SSL cert validation
+    context: a dictionary to place data that will be accessible to the response
+    '''
+    pass
+
+
+class ResponseObj(Bunch):
+    '''
+    An HTTP response object that contains the following request attributes:
+
+    code: response code, such as 200 for 'OK'
+    headers: the response headers
+    pass_headers: a list or set of headers to pass along in the response. All
+        other headeres will be stripped out. By default this includes:
+        ('Date', 'Cache-Control', 'Server', 'Content-Type', 'Location')
+    body: response body as a string
+    context: the context object from the request
+    '''
+
+    def __init__(self, **kwargs):
+        kwargs.setdefault('code', 200)
+        kwargs.setdefault('headers', {})
+        kwargs.setdefault('pass_headers', True)
+        kwargs.setdefault('body', '')
+        kwargs.setdefault('context', {})
+        super(ResponseObj, self).__init__(**kwargs)
+
+
+class ResponseStreamWriterFuture(tornado.concurrent.Future):
+    def __init__(self, write_fn, *args, **kwargs):
+        self.write_fn = write_fn
+        super().__init__(*args, **kwargs)
+
+
+def _make_proxy(methods, io_loop, req_callback, resp_callback, err_callback, debug_level=0):
+
+    @tornado.web.stream_request_body
+    class ProxyHandler(tornado.web.RequestHandler):
+
+        SUPPORTED_METHODS = methods
+
+        def initialize(self):
+            self.proxy_request_ready = tornado.concurrent.Future()
+            self.request_future = None
+
+        def on_connection_close(self):
+            if self.request_future is not None:
+                self.request_future.set_result(False)
+
+        def create_body_producer_future(self, write_fn):
+            self.request_future = ResponseStreamWriterFuture(write_fn)
+            self.proxy_request_ready.set_result(True)
+            return self.request_future
+
+        @tornado.gen.coroutine
+        def data_received(self, chunk):
+            yield self.proxy_request_ready
+
+            yield self.request_future.write_fn(chunk)
+
+        def make_requestobj(self, request):
+            '''
+            creates a request object for this request
+            '''
+
+            # get url for request
+            # surprisingly, tornado's HTTPRequest sometimes
+            # has a uri field with the full uri (http://...)
+            # and sometimes it just contains the path. :(
+
+            url = request.uri
+            if not url.startswith(u'http'):
+                url = u"{proto}://{netloc}{path}".format(
+                    proto=request.protocol,
+                    netloc=request.host,
+                    path=request.uri
+                )
+
+            parsedurl = urlparse.urlparse(url)
+
+            # Passing on the transfer encoding header, causes Tornado to not
+            # transmit valid chunks
+            headers = request.headers.copy()
+            if "Transfer-encoding" in headers:
+                del headers["Transfer-Encoding"]
+
+            # create request object
+
+            requestobj = RequestObj(
+                method=request.method,
+                protocol=parsedurl.scheme,
+                username=None,
+                password=None,
+                host=parsedurl.hostname,
+                port=parsedurl.port or 80,
+                path=parsedurl.path,
+                query=parsedurl.query,
+                fragment=parsedurl.fragment,
+                #body=request.body,
+                headers=headers,
+                follow_redirects=False,
+                validate_cert=True,
+                context={}
+            )
+
+            return requestobj, parsedurl
+
+
+        def make_request(self, obj, parsedurl):
+            '''
+            converts a request object into an HTTPRequest
+            '''
+
+            obj.headers.setdefault('Host', obj.host)
+
+            if obj.username or parsedurl.username or \
+                obj.password or parsedurl.password:
+
+                auth = u"{username}:{password}@".format(
+                    username=obj.username or parsedurl.username,
+                    password=obj.password or parsedurl.password
+                )
+
+            else:
+                auth = ''
+
+            url = u"{proto}://{auth}{host}{port}{path}{query}{frag}"
+            url = url.format(
+                proto=obj.protocol,
+                auth=auth,
+                host=obj.host,
+                port=(u':' + str(obj.port)) if (obj.port and obj.port != 80) else u'',
+                path=u'/'+obj.path.lstrip(u'/') if obj.path else u'',
+                query=u'?'+obj.query.lstrip(u'?') if obj.query else u'',
+                frag=obj.fragment
+            )
+
+            body_producer = None
+            if "Transfer-encoding" in self.request.headers and \
+                    self.request.headers["Transfer-Encoding"] == "chunked":
+                body_producer = self.create_body_producer_future
+
+            req = tornado.httpclient.HTTPRequest(
+                url=url,
+                method=obj.method,
+                body_producer=body_producer,
+                decompress_response=False,
+                headers=obj.headers,
+                follow_redirects=obj.follow_redirects,
+                allow_nonstandard_methods=True,
+                request_timeout=1*60*60 #1 hour
+            )
+
+            return req
+
+        def prepare(self):
+
+            request = self.request
+            if debug_level >= 4:
+                print("<<<<<<<< REQUEST <<<<<<<<")
+                pprint.pprint(request.__dict__)
+
+            MB = 1024 * 1024
+            GB = 1024 * MB
+
+            MAX_STREAMED_SIZE = 50 * GB
+            request.connection.set_max_body_size(MAX_STREAMED_SIZE)
+
+            requestobj, parsedurl = self.make_requestobj(request)
+
+            if debug_level >= 3:
+                print("<<<<<<<< REQUESTOBJ <<<<<<<<")
+                pprint.pprint(requestobj.__dict__)
+
+            if debug_level >= 1:
+                debugstr = "serving request from %s:%d%s " % (requestobj.host,
+                                                              requestobj.port or 80,
+                                                              requestobj.path)
+
+            modrequestobj = req_callback(requestobj)
+
+            if isinstance(modrequestobj, ResponseObj):
+                self.handle_response(modrequestobj)
+                return
+
+            if debug_level >= 1:
+                print(debugstr + "to %s:%d%s" % (modrequestobj.host,
+                                                 modrequestobj.port or 80,
+                                                 modrequestobj.path))
+
+            outreq = self.make_request(modrequestobj, parsedurl)
+
+            if debug_level >= 2:
+                print(">>>>>>>> REQUEST >>>>>>>>")
+                print("%s %s" % (outreq.method, outreq.url))
+                for k, v in outreq.headers.items():
+                    print( "%s: %s" % (k, v))
+
+            # send the request
+
+            def _resp_callback(response):
+                self.handle_response(response, context=modrequestobj.context)
+
+            client = tornado.httpclient.AsyncHTTPClient(io_loop=io_loop)
+            try:
+                client.fetch(outreq, _resp_callback,
+                             validate_cert=modrequestobj.validate_cert)
+            except tornado.httpclient.HTTPError as e:
+                if hasattr(e, 'response') and e.response:
+                    self.handle_response(e.response,
+                                         context=modrequestobj.context,
+                                         error=True)
+                else:
+                    self.set_status(500)
+                    self.write('Internal server error:\n' + str(e))
+                    self.finish()
+
+
+        def handle_response(self, response, context={}, error=False):
+
+            if not isinstance(response, ResponseObj):
+                if debug_level >= 4:
+                    print("<<<<<<<< RESPONSE <<<<<<<")
+                    pprint.pprint(response.__dict__)
+
+                responseobj = ResponseObj(
+                    code=response.code,
+                    headers=response.headers,
+                    pass_headers=True,
+                    body=response.body,
+                    context=context,
+                )
+            else:
+                responseobj = response
+
+            if debug_level >= 3:
+                print("<<<<<<<< RESPONSEOBJ <<<<<<<")
+                responseprint = copy(responseobj)
+                responseprint.body = "-- body content not displayed --"
+                pprint.pprint(responseprint.__dict__)
+
+            if not error:
+                mod = resp_callback(responseobj)
+            else:
+                mod = err_callback(responseobj)
+
+            # set the response status code
+
+            if mod.code == 599:
+                self.set_status(500)
+                self.write('Internal server error. Server unreachable.')
+                self.finish()
+                return
+
+            self.set_status(mod.code)
+
+            # set the response headers
+
+            if type(mod.pass_headers) == bool:
+                header_keys = mod.headers.keys() if mod.pass_headers else []
+            else:
+                header_keys = mod.pass_headers
+            for key in header_keys:
+                if key.lower() == "set-cookie":
+                    cookies = Cookie.BaseCookie()
+                    cookies.load(tornado.escape.native_str(mod.headers.get(key)))
+                    for cookie_key in cookies:
+                        cookie = cookies[cookie_key]
+                        params = dict(cookie)
+                        expires = params.pop('expires', None)
+                        if expires:
+                            expires = dateutil.parser.parse(expires)
+                        self.set_cookie(
+                            cookie.key,
+                            cookie.value,
+                            expires = expires,
+                            **params
+                        )
+                else:
+                    val = mod.headers.get(key)
+                    self.set_header(key, val)
+
+            if debug_level >= 2:
+                print(">>>>>>>> RESPONSE (%s) >>>>>>>" % mod.code)
+                for k, v in self._headers.items():
+                    print("%s: %s" % (k, v))
+                if hasattr(self, '_new_cookie'):
+                    print(self._new_cookie.output())
+
+            # set the response body
+
+            if mod.body:
+                self.write(mod.body)
+
+            self.finish()
+
+        @tornado.web.asynchronous
+        def get(self):
+            pass
+
+        @tornado.web.asynchronous
+        def options(self):
+            pass
+
+        @tornado.web.asynchronous
+        def head(self):
+            pass
+
+        @tornado.web.asynchronous
+        def put(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def patch(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def post(self):
+            self.request_future.set_result(True)
+
+        @tornado.web.asynchronous
+        def delete(self):
+            pass
+
+
+    return ProxyHandler
+
+
+def run_proxy(port,
+              methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD'],
+              req_callback=DEFAULT_CALLBACK,
+              resp_callback=DEFAULT_CALLBACK,
+              err_callback=DEFAULT_CALLBACK,
+              test_ssl=False,
+              debug_level=0,
+              io_loop=None,
+              ):
+
+    """
+    Run proxy on the specified port.
+
+    methods: the HTTP methods this proxy will support
+    req_callback: a callback that is passed a RequestObj that it should
+        modify and then return
+    resp_callback: a callback that is given a ResponseObj that it should
+        modify and then return
+    err_callback: in the case of an error, this callback will be called.
+        there's no difference between how this and the resp_callback are
+        used.
+    test_ssl: if true, will wrap the socket in an self signed ssl cert
+    start_ioloop: if True (default), the tornado IOLoop will be started
+        immediately.
+    debug_level: 0 no debug, 1 basic, 2 verbose
+    """
+
+    io_loop = tornado.ioloop.IOLoop.instance() if io_loop is None else io_loop
+
+    app = tornado.web.Application([
+        (r'.*', _make_proxy(methods=methods,
+                            io_loop=io_loop,
+                            req_callback=req_callback,
+                            resp_callback=resp_callback,
+                            err_callback=err_callback,
+                            debug_level=debug_level)),
+    ])
+
+    if test_ssl:
+        this_dir, this_filename = os.path.split(__file__)
+        kwargs = {
+            "ssl_options": {
+                "certfile": os.path.join(this_dir, "data", "test.crt"),
+                "keyfile": os.path.join(this_dir, "data", "test.key"),
+            },
+            "io_loop": io_loop,
+        }
+    else:
+        kwargs = {"io_loop": io_loop}
+
+    http_server = tornado.httpserver.HTTPServer(app, **kwargs)
+    http_server.listen(port)
+    return http_server
+
+
+if __name__ == '__main__':
+    port = 8888
+    if len(sys.argv) > 1:
+        port = int(sys.argv[1])
+
+    print("Starting HTTP proxy on port %d" % port)
+    run_proxy(port)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/tasklet.py
new file mode 100644 (file)
index 0000000..027e582
--- /dev/null
@@ -0,0 +1,535 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import os
+import threading
+import time
+
+import rift.tasklets
+import rift.mano.cloud
+
+from . import glance_proxy_server
+from . import glance_client
+from . import upload
+
+import gi
+gi.require_version('RwImageMgmtYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwDts', '1.0')
+
+from gi.repository import (
+    RwcalYang,
+    RwDts as rwdts,
+    RwImageMgmtYang,
+    RwLaunchpadYang,
+)
+
+
+class ImageRequestError(Exception):
+    pass
+
+
+class AccountNotFoundError(ImageRequestError):
+    pass
+
+
+class ImageNotFoundError(ImageRequestError):
+    pass
+
+
+class CloudAccountDtsHandler(object):
+    def __init__(self, log, dts, log_hdl):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._cloud_cfg_subscriber = None
+
+    def register(self, on_add_apply, on_delete_apply):
+        self._log.debug("creating cloud account config handler")
+        self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts, self._log, self._log_hdl,
+                rift.mano.cloud.CloudAccountConfigCallbacks(
+                    on_add_apply=on_add_apply,
+                    on_delete_apply=on_delete_apply,
+                    )
+                )
+        self._cloud_cfg_subscriber.register()
+
+
+def openstack_image_to_image_info(openstack_image):
+    """Convert the OpenstackImage to a ImageInfo protobuf message
+
+    Arguments:
+        openstack_image - A OpenstackImage instance
+
+    Returns:
+        A ImageInfo CAL protobuf message
+    """
+
+    image_info = RwcalYang.ImageInfoItem()
+
+    copy_fields = ["id", "name", "checksum", "container_format", "disk_format"]
+    for field in copy_fields:
+        value = getattr(openstack_image, field)
+        setattr(image_info, field, value)
+
+    image_info.state = openstack_image.status
+
+    return image_info
+
+
+class ImageDTSShowHandler(object):
+    """ A DTS publisher for the upload-jobs data container """
+    def __init__(self, log, loop, dts, job_controller):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._job_controller = job_controller
+
+        self._subscriber = None
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register as a publisher and wait for reg_ready to complete """
+        def get_xpath():
+            return "D,/rw-image-mgmt:upload-jobs"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if action != rwdts.QueryAction.READ:
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                return
+
+            jobs_pb_msg = self._job_controller.pb_msg
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath=get_xpath(),
+                    msg=jobs_pb_msg,
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+
+class ImageDTSRPCHandler(object):
+    """ A DTS publisher for the upload-job RPC's """
+    def __init__(self, log, loop, dts, accounts, glance_client, upload_task_creator, job_controller):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self._accounts = accounts
+        self._glance_client = glance_client
+        self._upload_task_creator = upload_task_creator
+        self._job_controller = job_controller
+
+        self._subscriber = None
+
+    @asyncio.coroutine
+    def _register_create_upload_job(self):
+        def get_xpath():
+            return "/rw-image-mgmt:create-upload-job"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            create_msg = msg
+
+            account_names = create_msg.cloud_account
+            # If cloud accounts were not specified, upload image to all cloud account
+            if not account_names:
+                account_names = list(self._accounts.keys())
+
+            for account_name in account_names:
+                if account_name not in self._accounts:
+                    raise AccountNotFoundError("Could not find account %s", account_name)
+
+            if create_msg.has_field("external_url"):
+                glance_image = yield from self._upload_task_creator.create_glance_image_from_url_create_rpc(
+                        account_names, create_msg.external_url
+                        )
+
+                tasks = yield from self._upload_task_creator.create_tasks_from_glance_id(
+                    account_names, glance_image.id
+                    )
+
+                def delete_image(ft):
+                    try:
+                        self._glance_client.delete_image_from_id(glance_image.id)
+                    except glance_client.OpenstackImageDeleteError:
+                        pass
+
+                # Create a job and when the job completes delete the temporary
+                # image from the catalog.
+                job_id = self._job_controller.create_job(
+                        tasks,
+                        on_completed=delete_image
+                        )
+
+            elif create_msg.has_field("onboarded_image"):
+                tasks = yield from self._upload_task_creator.create_tasks_from_onboarded_create_rpc(
+                    account_names, create_msg.onboarded_image
+                    )
+                job_id = self._job_controller.create_job(tasks)
+
+            else:
+                raise ImageRequestError("an image selection must be provided")
+
+            rpc_out_msg = RwImageMgmtYang.CreateUploadJobOutput(job_id=job_id)
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath="O," + get_xpath(),
+                    msg=rpc_out_msg,
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath="I," + get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+    @asyncio.coroutine
+    def _register_cancel_upload_job(self):
+        def get_xpath():
+            return "/rw-image-mgmt:cancel-upload-job"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("job_id"):
+                self._log.error("cancel-upload-job missing job-id field.")
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                return
+
+            job_id = msg.job_id
+
+            job = self._job_controller.get_job(job_id)
+            job.stop()
+
+            xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    xpath="O," + get_xpath(),
+                    )
+
+        reg_event = asyncio.Event(loop=self._loop)
+
+        @asyncio.coroutine
+        def on_ready(_, status):
+            reg_event.set()
+
+        self._subscriber = yield from self._dts.register(
+                xpath="I," + get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare,
+                    on_ready=on_ready,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+        yield from reg_event.wait()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for RPC's and wait for all registrations to complete """
+        yield from self._register_create_upload_job()
+        yield from self._register_cancel_upload_job()
+
+
+class GlanceClientUploadTaskCreator(object):
+    """ This class creates upload tasks using configured cloud accounts and
+    configured image catalog glance client """
+
+    def __init__(self, log, loop, accounts, glance_client):
+        self._log = log
+        self._loop = loop
+        self._accounts = accounts
+        self._glance_client = glance_client
+
+    @asyncio.coroutine
+    def create_tasks(self, account_names, image_id=None, image_name=None, image_checksum=None):
+        """ Create a list of UploadTasks for a list of cloud accounts
+        and a image with a matching image_name and image_checksum in the
+        catalog
+
+        Arguments:
+            account_names - A list of configured cloud account names
+            image_id - A image id
+            image_name - A image name
+            image_checksum - A image checksum
+
+        Returns:
+            A list of AccountImageUploadTask instances
+
+        Raises:
+            ImageNotFoundError - Could not find a matching image in the
+                image catalog
+
+            AccountNotFoundError - Could not find an account that matched
+                the provided account name
+        """
+        try:
+            image = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(
+                            None,
+                            self._glance_client.find_active_image,
+                            image_id,
+                            image_name,
+                            image_checksum,
+                            ),
+                    timeout=5,
+                    loop=self._loop,
+                    )
+
+        except glance_client.OpenstackImageError as e:
+            msg = "Could not find image in Openstack to upload"
+            self._log.exception(msg)
+            raise ImageNotFoundError(msg) from e
+
+        image_info = openstack_image_to_image_info(image)
+        self._log.debug("created image info: %s", image_info)
+
+        tasks = []
+        for account_name in account_names:
+            if account_name not in self._accounts:
+                raise AccountNotFoundError("Could not find account %s", account_name)
+
+        # For each account name provided, create a pipe (GlanceImagePipeGen)
+        # which feeds data into the UploadTask while also monitoring the various
+        # transmit stats (progress, bytes written, bytes per second, etc)
+        for account_name in account_names:
+            account = self._accounts[account_name]
+            self._log.debug("creating task for account %s", account.name)
+            glance_data_gen = self._glance_client.get_image_data(image_info.id)
+
+            pipe_gen = upload.GlanceImagePipeGen(self._log, self._loop, glance_data_gen)
+            progress_pipe = upload.UploadProgressWriteProxy(
+                    self._log, self._loop, image.size, pipe_gen.write_hdl
+                    )
+            progress_pipe.start_rate_monitoring()
+            pipe_gen.write_hdl = progress_pipe
+            pipe_gen.start()
+
+            task = upload.AccountImageUploadTask(
+                    self._log, self._loop, account, image_info, pipe_gen.read_hdl,
+                    progress_info=progress_pipe, write_canceller=pipe_gen,
+                    )
+            tasks.append(task)
+            self._log.debug("task created: %s", task)
+
+        return tasks
+
+    @asyncio.coroutine
+    def create_glance_image_from_url_create_rpc(self, account_names, create_msg):
+        if "image_url" not in create_msg:
+            raise ValueError("image_url must be specified")
+
+        if "image_id" in create_msg:
+            raise ImageRequestError("Cannot specify both image_url and image_id")
+
+        if "image_name" not in create_msg:
+            raise ImageRequestError("image_name must be specified when image_url is provided")
+
+        glance_image = yield from asyncio.wait_for(
+                self._loop.run_in_executor(
+                    None,
+                    self._glance_client.create_image_from_url,
+                    create_msg.image_url,
+                    create_msg.image_name,
+                    create_msg.image_checksum if "image_checksum" in create_msg else None,
+                    create_msg.disk_format if "disk_format" in create_msg else None,
+                    create_msg.container_format if "container_format" in create_msg else None,
+                    ),
+                timeout=5,
+                loop=self._loop,
+                )
+
+        return glance_image
+
+    @asyncio.coroutine
+    def create_tasks_from_glance_id(self, account_names, glance_image_id):
+        return (yield from self.create_tasks(account_names, glance_image_id))
+
+    @asyncio.coroutine
+    def create_tasks_from_onboarded_create_rpc(self, account_names, create_msg):
+        return (yield from self.create_tasks(
+            account_names,
+            create_msg.image_id if "image_id" in create_msg else None,
+            create_msg.image_name if "image_name" in create_msg else None,
+            create_msg.image_checksum if "image_checksum" in create_msg else None)
+            )
+
+
+class ImageManagerTasklet(rift.tasklets.Tasklet):
+    """
+    The RwImageMgrTasklet provides a interface for DTS to interact with an
+    instance of the Monitor class. This allows the Monitor class to remain
+    independent of DTS.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+
+        self.cloud_cfg_subscriber = None
+        self.http_proxy = None
+        self.proxy_server = None
+        self.dts = None
+        self.job_controller = None
+        self.cloud_accounts = {}
+        self.glance_client = None
+        self.task_creator = None
+        self.rpc_handler = None
+        self.show_handler = None
+
+    def start(self):
+        super().start()
+        self.log.info("Starting Image Manager Tasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwImageMgmtYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.dts.deinit()
+        except Exception as e:
+            self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        try:
+            self.log.debug("creating cloud account handler")
+            self.cloud_cfg_subscriber = CloudAccountDtsHandler(self.log, self.dts, self.log_hdl)
+            self.cloud_cfg_subscriber.register(
+                    self.on_cloud_account_create,
+                    self.on_cloud_account_delete
+                    )
+
+            self.log.debug("creating http proxy server")
+
+            self.http_proxy = glance_proxy_server.QuickProxyServer(self.log, self.loop)
+
+            self.proxy_server = glance_proxy_server.GlanceHTTPProxyServer(
+                    self.log, self.loop, self.http_proxy
+                    )
+            self.proxy_server.start()
+
+            self.job_controller = upload.ImageUploadJobController(
+                    self.log, self.loop
+                    )
+
+            self.glance_client = glance_client.OpenstackGlanceClient.from_token(
+                    self.log, "127.0.0.1", "9292", "test"
+                    )
+
+            self.task_creator = GlanceClientUploadTaskCreator(
+                    self.log, self.loop, self.cloud_accounts, self.glance_client
+                    )
+
+            self.rpc_handler = ImageDTSRPCHandler(
+                    self.log, self.loop, self.dts, self.cloud_accounts, self.glance_client, self.task_creator,
+                    self.job_controller
+                    )
+            yield from self.rpc_handler.register()
+
+            self.show_handler = ImageDTSShowHandler(
+                    self.log, self.loop, self.dts, self.job_controller
+                    )
+            yield from self.show_handler.register()
+
+        except Exception as e:
+            self.log.exception("error during init")
+
+    def on_cloud_account_create(self, account):
+        self.log.debug("adding cloud account: %s", account.name)
+        self.cloud_accounts[account.name] = account
+
+    def on_cloud_account_delete(self, account_name):
+        self.log.debug("deleting cloud account: %s", account_name)
+        if account_name not in self.cloud_accounts:
+            self.log.warning("cloud account not found: %s", account_name)
+
+        del self.cloud_accounts[account_name]
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py b/rwlaunchpad/plugins/rwimagemgr/rift/tasklets/rwimagemgr/upload.py
new file mode 100644 (file)
index 0000000..7ce74b2
--- /dev/null
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import collections
+import itertools
+import os
+import time
+import threading
+
+import rift.mano.cloud
+
+import gi
+gi.require_version('RwImageMgmtYang', '1.0')
+from gi.repository import (
+        RwImageMgmtYang,
+        )
+
+
+class UploadJobError(Exception):
+    pass
+
+
+class ImageUploadTaskError(Exception):
+    pass
+
+
+class ImageUploadError(ImageUploadTaskError):
+    pass
+
+
+class ImageListError(ImageUploadTaskError):
+    pass
+
+
+class ImageUploadJobController(object):
+    """ This class starts and manages ImageUploadJobs """
+    MAX_COMPLETED_JOBS = 20
+
+    def __init__(self, log, loop, max_completed_jobs=MAX_COMPLETED_JOBS):
+        self._log = log
+        self._loop = loop
+        self._job_id_gen = itertools.count(1)
+        self._max_completed_jobs = max_completed_jobs
+
+        self._jobs = {}
+        self._completed_jobs = collections.deque(
+                maxlen=self._max_completed_jobs
+                )
+
+    @property
+    def pb_msg(self):
+        """ the UploadJobs protobuf message """
+        upload_jobs_msg = RwImageMgmtYang.UploadJobs()
+        for job in self._jobs.values():
+            upload_jobs_msg.job.append(job.pb_msg)
+
+        return upload_jobs_msg
+
+    @property
+    def jobs(self):
+        """ the tracked list of ImageUploadJobs """
+        return self._jobs.values()
+
+    @property
+    def completed_jobs(self):
+        """ completed jobs in the tracked list of ImageUploadJobs """
+        return [job for job in self._jobs.values() if job in self._completed_jobs]
+
+    @property
+    def active_jobs(self):
+        """ in-progress jobs in the tracked list of ImageUploadJobs """
+        return [job for job in self._jobs.values() if job not in self._completed_jobs]
+
+    def _add_job(self, job):
+        self._jobs[job.id] = job
+
+    def _start_job(self, job, on_completed=None):
+        def on_job_completed(_):
+            self._log.debug("%s completed.  Adding to completed jobs list.", job)
+
+            # If adding a new completed job is going to overflow the
+            # completed job list, find the first job that completed and
+            # remove it from the tracked jobs.
+            if len(self._completed_jobs) == self._completed_jobs.maxlen:
+                first_completed_job = self._completed_jobs[-1]
+                del self._jobs[first_completed_job.id]
+
+            self._completed_jobs.appendleft(job)
+
+        job_future = job.start()
+        job_future.add_done_callback(on_job_completed)
+
+        if on_completed is not None:
+            job_future.add_done_callback(on_completed)
+
+    def get_job(self, job_id):
+        """ Get the UploadJob from the job id
+
+        Arguments:
+            job_id - the job id that was previously added to the controller
+
+        Returns:
+            The associated ImageUploadJob
+
+        Raises:
+            LookupError - Could not find the job id
+        """
+        if job_id not in self._jobs:
+            raise LookupError("Could not find job_id %s" % job_id)
+
+        return self._jobs[job_id]
+
+    def create_job(self, image_tasks, on_completed=None):
+        """ Create and start a ImageUploadJob from a list of ImageUploadTasks
+
+        Arguments:
+            image_tasks - a list of ImageUploadTasks
+            on_completed - a callback which is added to the job future
+
+        Returns:
+            A ImageUploadJob id
+        """
+        self._log.debug("Creating new job from %s image tasks", len(image_tasks))
+        new_job = ImageUploadJob(
+                self._log,
+                self._loop,
+                image_tasks,
+                job_id=next(self._job_id_gen)
+                )
+
+        self._add_job(new_job)
+        self._start_job(new_job, on_completed=on_completed)
+
+        return new_job.id
+
+
+class ImageUploadJob(object):
+    """ This class manages a set of ImageUploadTasks
+
+    In order to push an image (or set of images) to many cloud accounts, and get a single
+    status on that operation, we need a single status that represents all of those tasks.
+
+    The ImageUploadJob provides a single endpoint to control all the tasks and report
+    when all images are successfully upload or when any one fails.
+    """
+    STATES = ("QUEUED", "IN_PROGRESS", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
+    TIMEOUT_JOB = 6 * 60 * 60  # 6 hours
+    JOB_GEN = itertools.count(1)
+
+    def __init__(self, log, loop, upload_tasks, job_id=None, timeout_job=TIMEOUT_JOB):
+        self._log = log
+        self._loop = loop
+        self._upload_tasks = upload_tasks
+        self._job_id = next(ImageUploadJob.JOB_GEN) if job_id is None else job_id
+        self._timeout_job = timeout_job
+
+        self._state = "QUEUED"
+        self._state_stack = [self._state]
+
+        self._start_time = time.time()
+        self._stop_time = 0
+
+        self._task_future_map = {}
+        self._job_future = None
+
+    def __repr__(self):
+        return "{}(job_id={}, state={})".format(
+                self.__class__.__name__, self._job_id, self._state
+                )
+
+    @property
+    def id(self):
+        return self._job_id
+
+    @property
+    def state(self):
+        """ The state of the ImageUploadJob """
+        return self._state
+
+    @state.setter
+    def state(self, new_state):
+        """ Set the state of the ImageUploadJob """
+        states = ImageUploadJob.STATES
+        assert new_state in states
+        assert states.index(new_state) >= states.index(self._state)
+        self._state_stack.append(new_state)
+
+        self._state = new_state
+
+    @property
+    def state_stack(self):
+        """ The list of states that this job progressed through  """
+        return self._state_stack
+
+    @property
+    def pb_msg(self):
+        """ The UploadJob protobuf message """
+        task = RwImageMgmtYang.UploadJob.from_dict({
+            "id": self._job_id,
+            "status": self._state,
+            "start_time": self._start_time,
+            "upload_tasks": [task.pb_msg for task in self._upload_tasks]
+        })
+
+        if self._stop_time:
+            task.stop_time = self._stop_time
+
+        return task
+
+    def _start_upload_tasks(self):
+        self._log.debug("Starting %s upload tasks", len(self._upload_tasks))
+
+        for upload_task in self._upload_tasks:
+            upload_task.start()
+
+    @asyncio.coroutine
+    def _wait_for_upload_tasks(self):
+        self._log.debug("Waiting for upload tasks to complete")
+
+        wait_coroutines = [t.wait() for t in self._upload_tasks]
+        if wait_coroutines:
+            yield from asyncio.wait(
+                    wait_coroutines,
+                    timeout=self._timeout_job,
+                    loop=self._loop
+                    )
+
+        self._log.debug("All upload tasks completed")
+
+    def _set_final_job_state(self):
+        failed_tasks = []
+        for task in self._upload_tasks:
+            if task.state != "COMPLETED":
+                failed_tasks.append(task)
+
+        if failed_tasks:
+            self._log.error("%s had %s FAILED tasks.", self, len(failed_tasks))
+            self.state = "FAILED"
+        else:
+            self._log.debug("%s tasks completed successfully", len(self._upload_tasks))
+            self.state = "COMPLETED"
+
+    @asyncio.coroutine
+    def _cancel_job(self):
+        for task in self._upload_tasks:
+            task.stop()
+
+        # TODO: Wait for all tasks to actually reach terminal
+        # states.
+
+        self.state = "CANCELLED"
+
+    @asyncio.coroutine
+    def _do_job(self):
+        self.state = "IN_PROGRESS"
+        self._start_upload_tasks()
+        try:
+            yield from self._wait_for_upload_tasks()
+        except asyncio.CancelledError:
+            self._log.debug("%s was cancelled.  Cancelling all tasks.",
+                            self)
+            self._loop.create_task(self._cancel_job())
+            raise
+        finally:
+            self._stop_time = time.time()
+            self._job_future = None
+
+        self._set_final_job_state()
+
+    @asyncio.coroutine
+    def wait(self):
+        """ Wait for the job to reach a terminal state """
+        if self._job_future is None:
+            raise UploadJobError("Job not started")
+
+        yield from asyncio.wait_for(
+                self._job_future,
+                self._timeout_job,
+                loop=self._loop
+                )
+
+    def start(self):
+        """ Start the job and all child tasks """
+        if self._state != "QUEUED":
+            raise UploadJobError("Job already started")
+
+        self._job_future = self._loop.create_task(self._do_job())
+        return self._job_future
+
+    def stop(self):
+        """ Stop the job and all child tasks  """
+        if self._job_future is not None:
+            self.state = "CANCELLING"
+            self._job_future.cancel()
+
+
+class ByteRateCalculator(object):
+    """  This class produces a byte rate from inputted measurements"""
+    def __init__(self, rate_time_constant):
+        self._rate = 0
+        self._time_constant = rate_time_constant
+
+    @property
+    def rate(self):
+        return self._rate
+
+    def add_measurement(self, num_bytes, time_delta):
+        rate = num_bytes / time_delta
+        if self._rate == 0:
+            self._rate = rate
+        else:
+            self._rate += ((rate - self._rate) / self._time_constant)
+
+        return self._rate
+
+
+class UploadProgressWriteProxy(object):
+    """ This class implements a write proxy with produces various progress stats
+
+    In order to keep the complexity of the UploadTask down, this class acts as a
+    proxy for a file write.  By providing the original handle to be written to
+    and having the client class call write() on this object, we can produce the
+    various statistics to be consumed.
+    """
+    RATE_TIME_CONSTANT = 5
+
+    def __init__(self, log, loop, bytes_total, write_hdl):
+        self._log = log
+        self._loop = loop
+        self._bytes_total = bytes_total
+        self._write_hdl = write_hdl
+
+        self._bytes_written = 0
+        self._byte_rate = 0
+
+        self._rate_calc = ByteRateCalculator(UploadProgressWriteProxy.RATE_TIME_CONSTANT)
+        self._rate_task = None
+
+    def write(self, data):
+        self._write_hdl.write(data)
+        self._bytes_written += len(data)
+
+    def close(self):
+        self._write_hdl.close()
+        if self._rate_task is not None:
+            self._log.debug("stopping rate monitoring task")
+            self._rate_task.cancel()
+
+    def start_rate_monitoring(self):
+        """ Start the rate monitoring task """
+        @asyncio.coroutine
+        def periodic_rate_task():
+            while True:
+                start_time = time.time()
+                start_bytes = self._bytes_written
+                yield from asyncio.sleep(1, loop=self._loop)
+                time_period = time.time() - start_time
+                num_bytes = self._bytes_written - start_bytes
+
+                self._byte_rate = self._rate_calc.add_measurement(num_bytes, time_period)
+
+        self._log.debug("starting rate monitoring task")
+        self._rate_task = self._loop.create_task(periodic_rate_task())
+
+    @property
+    def progress_percent(self):
+        if self._bytes_total == 0:
+            return 0
+
+        return int(self._bytes_written / self._bytes_total * 100)
+
+    @property
+    def bytes_written(self):
+        return self._bytes_written
+
+    @property
+    def bytes_total(self):
+        return self._bytes_total
+
+    @property
+    def bytes_rate(self):
+        return self._byte_rate
+
+
+class GlanceImagePipeGen(object):
+    """ This class produces a read file handle from a generator that produces bytes
+
+    The CAL API takes a file handle as an input.  The Glance API creates a generator
+    that produces byte strings.  This class acts as the mediator by creating a pipe
+    and pumping the bytestring from the generator into the write side of the pipe.
+
+    A pipe has the useful feature here that it will block at the buffer size until
+    the reader has consumed.  This allows us to only pull from glance and push at the
+    pace of the reader preventing us from having to store the images locally on disk.
+    """
+    def __init__(self, log, loop, data_gen):
+        self._log = log
+        self._loop = loop
+        self._data_gen = data_gen
+
+        read_fd, write_fd = os.pipe()
+
+        self._read_hdl = os.fdopen(read_fd, 'rb')
+        self._write_hdl = os.fdopen(write_fd, 'wb')
+        self._close_hdl = self._write_hdl
+
+    @property
+    def write_hdl(self):
+        return self._write_hdl
+
+    @write_hdl.setter
+    def write_hdl(self, new_write_hdl):
+        self._write_hdl = new_write_hdl
+
+    @property
+    def read_hdl(self):
+        return self._read_hdl
+
+    def _gen_writer(self):
+        self._log.debug("starting image data write to pipe")
+        try:
+            for data in self._data_gen:
+                try:
+                    self._write_hdl.write(data)
+                except (BrokenPipeError, ValueError) as e:
+                    self._log.warning("write pipe closed: %s", str(e))
+                    return
+
+        except Exception as e:
+            self._log.exception("error when writing data to pipe: %s", str(e))
+
+        finally:
+            self._log.debug("closing write side of pipe")
+            try:
+                self._write_hdl.close()
+            except OSError:
+                pass
+
+    def start(self):
+        t = threading.Thread(target=self._gen_writer)
+        t.daemon = True
+        t.start()
+
+    def stop(self):
+        self._log.debug("stop requested, closing write side of pipe")
+        self._write_hdl.close()
+
+
+class AccountImageUploadTask(object):
+    """ This class manages an create_image task from an image info and file handle
+
+    Manage the upload of a image to a configured cloud account.
+    """
+    STATES = ("QUEUED", "CHECK_IMAGE_EXISTS", "UPLOADING", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
+
+    TIMEOUT_CHECK_EXISTS = 10
+    TIMEOUT_IMAGE_UPLOAD = 6 * 60 * 60  # 6 hours
+
+    def __init__(self, log, loop, account, image_info, image_hdl,
+                 timeout_exists=TIMEOUT_CHECK_EXISTS, timeout_upload=TIMEOUT_IMAGE_UPLOAD,
+                 progress_info=None, write_canceller=None
+                 ):
+        self._log = log
+        self._loop = loop
+        self._account = account
+        self._image_info = image_info.deep_copy()
+        self._image_hdl = image_hdl
+
+        self._timeout_exists = timeout_exists
+        self._timeout_upload = timeout_upload
+
+        self._progress_info = progress_info
+        self._write_canceller = write_canceller
+
+        self._state = "QUEUED"
+        self._state_stack = [self._state]
+
+        self._detail = "Task is waiting to be started"
+        self._start_time = time.time()
+        self._stop_time = 0
+        self._upload_future = None
+
+        if not image_info.has_field("name"):
+            raise ValueError("image info must have name field")
+
+    @property
+    def state(self):
+        return self._state
+
+    @state.setter
+    def state(self, new_state):
+        states = AccountImageUploadTask.STATES
+        assert new_state in states
+        assert states.index(new_state) >= states.index(self._state)
+        self._state_stack.append(new_state)
+
+        self._state = new_state
+
+    @property
+    def state_stack(self):
+        return self._state_stack
+
+    @property
+    def image_id(self):
+        """ The image name being uploaded """
+        return self._image_info.id
+
+    @property
+    def image_name(self):
+        """ The image name being uploaded """
+        return self._image_info.name
+
+    @property
+    def image_checksum(self):
+        """ The image checksum being uploaded """
+        if self._image_info.has_field("checksum"):
+            return self._image_info.checksum
+
+        return None
+
+    @property
+    def cloud_account(self):
+        """ The cloud account name which the image is being uploaded to """
+        return self._account.name
+
+    @property
+    def pb_msg(self):
+        """ The UploadTask protobuf message """
+        task = RwImageMgmtYang.UploadTask.from_dict({
+            "cloud_account": self.cloud_account,
+            "image_id": self.image_id,
+            "image_name": self.image_name,
+            "status": self.state,
+            "detail": self._detail,
+            "start_time": self._start_time,
+        })
+
+        if self.image_checksum is not None:
+            task.image_checksum = self.image_checksum
+
+        if self._stop_time:
+            task.stop_time = self._stop_time
+
+        if self._progress_info:
+            task.bytes_written = self._progress_info.bytes_written
+            task.bytes_total = self._progress_info.bytes_total
+            task.progress_percent = self._progress_info.progress_percent
+            task.bytes_per_second = self._progress_info.bytes_rate
+
+        if self.state == "COMPLETED":
+            task.progress_percent = 100
+
+        return task
+
+    def _get_account_images(self):
+        account_images = []
+        self._log.debug("getting image list for account {}".format(self._account.name))
+        try:
+            account_images = self._account.get_image_list()
+        except rift.mano.cloud.CloudAccountCalError as e:
+            msg = "could not get image list for account {}".format(self._account.name)
+            self._log.error(msg)
+            raise ImageListError(msg) from e
+
+        return account_images
+
+    def _has_existing_image(self):
+        account = self._account
+
+        account_images = self._get_account_images()
+
+        matching_images = [i for i in account_images if i.name == self.image_name]
+
+        if self.image_checksum is not None:
+            matching_images = [i for i in matching_images if i.checksum == self.image_checksum]
+
+        if matching_images:
+            self._log.debug("found matching image with checksum in account %s",
+                            account.name)
+            return True
+
+        self._log.debug("did not find matching image with checksum in account %s",
+                        account.name)
+        return False
+
+    def _upload_image(self):
+        image = self._image_info
+        account = self._account
+
+        image.fileno = self._image_hdl.fileno()
+
+        self._log.debug("uploading to account {}: {}".format(account.name, image))
+        try:
+            image.id = account.create_image(image)
+        except rift.mano.cloud.CloudAccountCalError as e:
+            msg = "error when uploading image {} to cloud account: {}".format(image.name, str(e))
+            self._log.error(msg)
+            raise ImageUploadError(msg) from e
+
+        self._log.debug('uploaded image (id: {}) to account{}: {}'.format(
+                        image.id, account.name, image.name))
+
+        return image.id
+
+    @asyncio.coroutine
+    def _do_upload(self):
+        try:
+            self.state = "CHECK_IMAGE_EXISTS"
+            has_image = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(None, self._has_existing_image),
+                    timeout=self._timeout_exists,
+                    loop=self._loop
+                    )
+            if has_image:
+                self.state = "COMPLETED"
+                self._detail = "Image already exists on destination"
+                return
+
+            self.state = "UPLOADING"
+            self._detail = "Uploading image"
+
+            # Note that if the upload times out, the upload thread may still
+            # stick around.  We'll need another method of cancelling the task
+            # through the VALA interface.
+            image_id = yield from asyncio.wait_for(
+                    self._loop.run_in_executor(None, self._upload_image),
+                    timeout=self._timeout_upload,
+                    loop=self._loop
+                    )
+
+        except asyncio.CancelledError as e:
+            self.state = "CANCELLED"
+            self._detail = "Image upload cancelled"
+
+        except ImageUploadTaskError as e:
+            self.state = "FAILED"
+            self._detail = str(e)
+
+        except asyncio.TimeoutError as e:
+            self.state = "FAILED"
+            self._detail = "Timed out during upload task: %s" % str(e)
+
+        else:
+            # If the user does not provide a checksum and performs a URL source
+            # upload with an incorrect URL, then Glance does not indicate a failure
+            # and the CAL cannot detect an incorrect upload.  In this case, use
+            # the bytes_written to detect a bad upload and mark the task as failed.
+            if self._progress_info and self._progress_info.bytes_written == 0:
+                self.state = "FAILED"
+                self._detail = "No bytes written.  Possible bad image source."
+                return
+
+            self.state = "COMPLETED"
+            self._detail = "Image successfully uploaded.  Image id: %s" % image_id
+
+        finally:
+            self._stop_time = time.time()
+            self._upload_future = None
+
+    @asyncio.coroutine
+    def wait(self):
+        """ Wait for the upload task to complete """
+        if self._upload_future is None:
+            raise ImageUploadError("Task not started")
+
+        yield from asyncio.wait_for(
+                self._upload_future,
+                self._timeout_upload, loop=self._loop
+                )
+
+    def start(self):
+        """ Start the upload task """
+        if self._state != "QUEUED":
+            raise ImageUploadError("Task already started")
+
+        self._log.info("Starting %s", self)
+
+        self._upload_future = self._loop.create_task(self._do_upload())
+
+        return self._upload_future
+
+    def stop(self):
+        """ Stop the upload task in progress """
+        if self._upload_future is None:
+            self._log.warning("Cannot cancel %s.  Not in progress.", self)
+            return
+
+        self.state = "CANCELLING"
+        self._detail = "Cancellation has been requested"
+
+        self._log.info("Cancelling %s", self)
+        self._upload_future.cancel()
+        if self._write_canceller is not None:
+            self._write_canceller.stop()
diff --git a/rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py b/rwlaunchpad/plugins/rwimagemgr/rwimagemgrtasklet.py
new file mode 100755 (executable)
index 0000000..9fa34d2
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwimagemgr
+
+class Tasklet(rift.tasklets.rwimagemgr.ImageManagerTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt b/rwlaunchpad/plugins/rwimagemgr/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..4704724
--- /dev/null
@@ -0,0 +1,29 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 06/28/2016
+# 
+
+rift_py3test(utest_image_upload.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_image_upload.py
+  )
+
+rift_py3test(utest_dts_handlers.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_dts_handlers.py
+  )
+
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_dts_handlers.py
new file mode 100755 (executable)
index 0000000..7ba4f76
--- /dev/null
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+
+import asyncio
+
+#asynctest looks for selectors under it's own package but in
+#python3.3 it exists under the asyncio package
+import sys
+sys.path.append(asyncio.__path__[0])
+import asynctest
+
+import logging
+import os
+import unittest
+import unittest.mock
+import xmlrunner
+
+import gi
+gi.require_version("RwDts", "1.0")
+gi.require_version("RwImageMgmtYang", "1.0")
+from gi.repository import (
+    RwDts,
+    RwImageMgmtYang,
+)
+
+import rift.tasklets
+import rift.test.dts
+
+from rift.tasklets.rwimagemgr import tasklet
+from rift.tasklets.rwimagemgr import upload
+
+from rift.test.dts import async_test
+
+import utest_image_upload
+
+
+def create_job_controller_mock():
+    jc_mock = unittest.mock.Mock(upload.ImageUploadJobController)
+
+    return jc_mock
+
+
+def create_upload_task_creator_mock():
+    creator_mock = asynctest.CoroutineMock(spec=["create_tasks_from_onboarded_create_rpc"])
+
+    return creator_mock
+
+
+class RwImageRPCTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return RwImageMgmtYang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.task_creator_mock = create_upload_task_creator_mock()
+        self.job_controller_mock = create_job_controller_mock()
+        self.rpc_handler = tasklet.ImageDTSRPCHandler(
+                self.log, self.loop, self.dts, {'mock', None}, object(), self.task_creator_mock,
+                self.job_controller_mock
+                )
+        self.show_handler = tasklet.ImageDTSShowHandler(
+                self.log, self.loop, self.dts, self.job_controller_mock
+                )
+
+        self.tinfo_c = self.new_tinfo(self.id() + "_client")
+        self.dts_c = rift.tasklets.DTS(self.tinfo_c, self.schema, self.loop)
+
+        self._upload_mixin = utest_image_upload.UploadTaskMixin(self.log, self.loop)
+        self._image_mock_mixin = utest_image_upload.ImageMockMixin(self)
+
+    @async_test
+    def test_create_job(self):
+        yield from self.rpc_handler.register()
+        yield from self.show_handler.register()
+
+        account = self._image_mock_mixin.account
+        with self._upload_mixin.create_upload_task(account) as upload_task:
+            self.task_creator_mock.create_tasks_from_onboarded_create_rpc.return_value = [upload_task]
+            self.job_controller_mock.create_job.return_value = 2
+            type(self.job_controller_mock).pb_msg = unittest.mock.PropertyMock(
+                    return_value=RwImageMgmtYang.UploadJobs.from_dict({
+                        "job": [
+                            {
+                                "id": 2,
+                                "upload_tasks": [upload_task.pb_msg],
+                                "status": "COMPLETED"
+                            }
+                        ]
+                    })
+                  )
+
+            create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+                "cloud_account": [upload_task.cloud_account],
+                "onboarded_image": {
+                    "image_name": upload_task.image_name,
+                    "image_checksum": upload_task.image_checksum,
+                }
+            })
+
+            query_iter = yield from self.dts_c.query_rpc(
+                    "I,/rw-image-mgmt:create-upload-job",
+                    0,
+                    create_job_msg,
+                    )
+
+            for fut_resp in query_iter:
+                rpc_result = (yield from fut_resp).result
+
+            self.assertEqual(2, rpc_result.job_id)
+
+            self.assertTrue(
+                    self.task_creator_mock.create_tasks_from_onboarded_create_rpc.called
+                    )
+
+            query_iter = yield from self.dts_c.query_read(
+                    "D,/rw-image-mgmt:upload-jobs",
+                    )
+
+            for fut_resp in query_iter:
+                rpc_result = (yield from fut_resp).result
+                self.assertEqual(1, len(rpc_result.job))
+                self.assertEqual(2, rpc_result.job[0].id)
+                self.assertEqual(1, len(rpc_result.job[0].upload_tasks))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py b/rwlaunchpad/plugins/rwimagemgr/test/utest_image_upload.py
new file mode 100755 (executable)
index 0000000..9d4464f
--- /dev/null
@@ -0,0 +1,511 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import contextlib
+import io
+import logging
+import os
+import sys
+import tempfile
+import time
+import unittest
+import uuid
+import xmlrunner
+
+from rift.mano import cloud
+from rift.tasklets.rwimagemgr import upload
+from rift.package import checksums
+from rift.test.dts import async_test
+import rw_status
+
+import gi
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+        RwCal,
+        RwCloudYang,
+        RwLog,
+        RwTypes,
+        RwcalYang,
+        )
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class CreateImageMock(object):
+    def __init__(self, log):
+        self._log = log
+        self.image_name = None
+        self.image_checksum = None
+
+        self.do_fail = False
+        self.do_read_slow = False
+
+        self._image_msgs = []
+
+    def add_existing_image(self, image_msg):
+        self._log.debug("Appending existing image msg: %s", image_msg)
+        self._image_msgs.append(image_msg)
+
+    @rwstatus
+    def do_create_image(self, _, image):
+        if self.do_fail:
+            self._log.debug("Simulating failure")
+            raise ValueError("FAILED")
+
+        if not image.has_field("fileno"):
+            raise ValueError("Image must have fileno")
+
+        self.image_name = image.name
+
+        # Create a duplicate file descriptor to allow this code to own
+        # its own descritor (and thus close it) and allow the client to
+        # own and close its own descriptor.
+        new_fileno = os.dup(image.fileno)
+        with os.fdopen(new_fileno, 'rb') as hdl:
+            bytes_hdl = io.BytesIO()
+            if self.do_read_slow:
+                self._log.debug("slow reading from mock cal")
+                try:
+                    num_bytes = 0
+                    while True:
+                        d = os.read(new_fileno, 1024)
+                        num_bytes += len(d)
+                        bytes_hdl.write(d)
+                        if not d:
+                            self._log.debug("read %s bytes", num_bytes)
+                            return
+
+                        time.sleep(.05)
+
+                except Exception as e:
+                    self._log.warning("caught exception when reading: %s",
+                                      str(e))
+                    raise
+
+            else:
+                bytes_hdl.write(hdl.read())
+
+            bytes_hdl.seek(0)
+            self.image_checksum = checksums.checksum(bytes_hdl)
+            bytes_hdl.close()
+
+        return str(uuid.uuid4())
+
+    @rwstatus
+    def do_get_image_list(self, account):
+        boxed_image_list = RwcalYang.VimResources()
+        for msg in self._image_msgs:
+            boxed_image_list.imageinfo_list.append(msg)
+
+        return boxed_image_list
+
+
+def create_random_image_file():
+    with open("/dev/urandom", "rb") as rand_hdl:
+        file_hdl = tempfile.NamedTemporaryFile("r+b")
+        file_hdl.write(rand_hdl.read(1 * 1024 * 1024))
+        file_hdl.flush()
+        file_hdl.seek(0)
+        return file_hdl
+
+
+def get_file_hdl_gen(file_hdl):
+    while True:
+        try:
+            d = file_hdl.read(64)
+        except ValueError:
+            return
+
+        if not d:
+            return
+
+        yield d
+
+
+def get_image_checksum(image_hdl):
+    image_checksum = checksums.checksum(image_hdl)
+    image_hdl.seek(0)
+    return image_checksum
+
+
+def create_image_info(image_name, image_checksum):
+    image = RwcalYang.ImageInfoItem()
+    image.name = image_name
+    image.checksum = image_checksum
+    image.disk_format = os.path.splitext(image_name)[1][1:]
+    image.container_format = "bare"
+
+    return image
+
+
+class UploadTaskMixin(object):
+    def __init__(self, log, loop):
+        self._log = log
+        self._loop = loop
+
+    def create_image_hdl(self):
+        image_hdl = create_random_image_file()
+
+        return image_hdl
+
+    @contextlib.contextmanager
+    def create_upload_task(self, account, image_name="test.qcow2",
+                           image_checksum=None, image_info=None):
+
+        with self.create_image_hdl() as image_hdl:
+
+            image_checksum = get_image_checksum(image_hdl) \
+                if image_checksum is None else image_checksum
+
+            image_info = create_image_info(image_name, image_checksum) \
+                if image_info is None else image_info
+
+            iter_hdl = get_file_hdl_gen(image_hdl)
+            pipe_gen = upload.GlanceImagePipeGen(self._log, self._loop, iter_hdl)
+
+            upload_task = upload.AccountImageUploadTask(
+                    self._log, self._loop, account, image_info, pipe_gen.read_hdl,
+                    write_canceller=pipe_gen
+                    )
+            pipe_gen.start()
+
+            yield upload_task
+
+
+class ImageMockMixin(object):
+    ACCOUNT_MSG = RwCloudYang.CloudAccount(
+        name="mock",
+        account_type="mock",
+        )
+
+    def __init__(self, log):
+        self._log = log
+        self._account = cloud.CloudAccount(
+                self._log,
+                RwLog.Ctx.new(__file__), ImageMockMixin.ACCOUNT_MSG
+                )
+
+        self._create_image_mock = CreateImageMock(self._log)
+
+        # Mock the create_image call
+        self._account.cal.create_image = self._create_image_mock.do_create_image
+        self._account.cal.get_image_list = self._create_image_mock.do_get_image_list
+
+    @property
+    def account(self):
+        return self._account
+
+    @property
+    def image_mock(self):
+        return self._create_image_mock
+
+
+class TestImageUploadTask(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        UploadTaskMixin.__init__(self, self._log, self._loop)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_upload_image_task(self):
+        with self.create_upload_task(self.account) as upload_task:
+            yield from upload_task.start()
+
+        self.assertIn("QUEUED", upload_task.state_stack)
+        self.assertIn("CHECK_IMAGE_EXISTS", upload_task.state_stack)
+        self.assertIn("UPLOADING", upload_task.state_stack)
+        self.assertIn("COMPLETED", upload_task.state_stack)
+
+        self.assertEqual("COMPLETED", upload_task.state)
+
+        self.assertEqual(self.image_mock.image_name, upload_task.image_name)
+        self.assertEqual(self.image_mock.image_checksum, upload_task.image_checksum)
+
+        task_pb_msg = upload_task.pb_msg
+        self.assertEqual(upload_task.image_name, task_pb_msg.image_name)
+
+    @async_test
+    def test_cancel_image_task(self):
+        @asyncio.coroutine
+        def wait_for_task_state(upload_task, state, timeout=10):
+            start_time = time.time()
+            while (time.time() - start_time) < timeout:
+                if upload_task.state == state:
+                    return
+
+                yield from asyncio.sleep(.01)
+
+            raise asyncio.TimeoutError()
+
+        self.image_mock.do_read_slow = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            upload_task.start()
+            yield from wait_for_task_state(upload_task, "UPLOADING")
+            upload_task.stop()
+            self.assertEqual("CANCELLING", upload_task.state)
+            yield from wait_for_task_state(upload_task, "CANCELLED")
+
+    @async_test
+    def test_create_image_failed(self):
+        self.image_mock.do_fail = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            yield from upload_task.start()
+
+        self.assertEqual("FAILED", upload_task.state)
+
+    @async_test
+    def test_create_image_name_and_checksum_exists(self):
+        with self.create_upload_task(self.account) as upload_task:
+            image_entry = RwcalYang.ImageInfoItem(
+                    id="asdf",
+                    name=upload_task.image_name,
+                    checksum=upload_task.image_checksum
+                    )
+            self.image_mock.add_existing_image(image_entry)
+
+            yield from upload_task.start()
+
+        # No image should have been uploaded, since the name and checksum
+        self.assertEqual(self.image_mock.image_checksum, None)
+
+        self.assertEqual("COMPLETED", upload_task.state)
+        self.assertTrue("UPLOADING" not in upload_task.state_stack)
+
+
+class TestUploadJob(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        UploadTaskMixin.__init__(self, self._log, self._loop)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_single_task_upload_job(self):
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(self._log, self._loop, [upload_task])
+            self.assertEqual("QUEUED", job.state)
+            yield from job.start()
+
+        self.assertIn("QUEUED", job.state_stack)
+        self.assertIn("IN_PROGRESS", job.state_stack)
+        self.assertIn("COMPLETED", job.state_stack)
+
+        self.assertEqual("COMPLETED", job.state)
+
+        job_pb_msg = job.pb_msg
+        self.assertEqual("COMPLETED", job_pb_msg.status)
+
+    @async_test
+    def test_multiple_tasks_upload_job(self):
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job = upload.ImageUploadJob(
+                        self._log, self._loop, [upload_task1, upload_task2])
+                yield from job.start()
+
+        self.assertEqual("COMPLETED", job.state)
+
+    @async_test
+    def test_failed_task_in_job(self):
+        self.image_mock.do_fail = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(
+                    self._log, self._loop, [upload_task])
+            yield from job.start()
+
+        self.assertEqual("FAILED", job.state)
+
+    @async_test
+    def test_cancel_job(self):
+        @asyncio.coroutine
+        def wait_for_job_state(upload_job, state, timeout=10):
+            start_time = time.time()
+            while (time.time() - start_time) < timeout:
+                if upload_job.state == state:
+                    return
+
+                yield from asyncio.sleep(.01)
+
+            raise asyncio.TimeoutError()
+
+        self.image_mock.do_read_slow = True
+
+        with self.create_upload_task(self.account) as upload_task:
+            job = upload.ImageUploadJob(
+                    self._log, self._loop, [upload_task])
+            job.start()
+            yield from wait_for_job_state(job, "IN_PROGRESS")
+            job.stop()
+            self.assertEqual("CANCELLING", job.state)
+            yield from wait_for_job_state(job, "CANCELLED")
+
+        self.assertEqual("CANCELLED", job.state)
+
+
+class TestUploadJobController(unittest.TestCase, UploadTaskMixin, ImageMockMixin):
+    def __init__(self, *args, **kwargs):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+        ImageMockMixin.__init__(self, self._log)
+        unittest.TestCase.__init__(self, *args, **kwargs)
+
+    @async_test
+    def test_controller_single_task_job(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task:
+            job_id = controller.create_job([upload_task])
+            self.assertEqual(len(controller.active_jobs), 1)
+            self.assertEqual(len(controller.completed_jobs), 0)
+
+            job = controller.get_job(job_id)
+            yield from job.wait()
+
+            self.assertEqual(len(controller.active_jobs), 0)
+            self.assertEqual(len(controller.completed_jobs), 1)
+
+            upload_jobs_pb_msg = controller.pb_msg
+            self.assertEqual(len(upload_jobs_pb_msg.job), 1)
+
+    @async_test
+    def test_controller_multi_task_job(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job_id = controller.create_job([upload_task1, upload_task2])
+                self.assertEqual(len(controller.active_jobs), 1)
+                self.assertEqual(len(controller.completed_jobs), 0)
+
+                job = controller.get_job(job_id)
+                yield from job.wait()
+                self.assertEqual(len(controller.active_jobs), 0)
+                self.assertEqual(len(controller.completed_jobs), 1)
+
+    @async_test
+    def test_controller_multi_jobs(self):
+        controller = upload.ImageUploadJobController(
+                self._log, self._loop
+                )
+
+        with self.create_upload_task(self.account) as upload_task1:
+            with self.create_upload_task(self.account) as upload_task2:
+                job1_id = controller.create_job([upload_task1])
+                job2_id = controller.create_job([upload_task2])
+                self.assertEqual(len(controller.active_jobs), 2)
+                self.assertEqual(len(controller.completed_jobs), 0)
+
+                job1 = controller.get_job(job1_id)
+                job2 = controller.get_job(job2_id)
+
+                yield from asyncio.wait(
+                        [job1.wait(), job2.wait()],
+                        loop=self._loop
+                        )
+
+                self.assertEqual(len(controller.active_jobs), 0)
+                self.assertEqual(len(controller.completed_jobs), 2)
+
+
+class TestRateCalc(unittest.TestCase):
+    def test_no_smoothing(self):
+        calc = upload.ByteRateCalculator(1)
+        self.assertEqual(0, calc.rate)
+        calc.add_measurement(100, 1)
+        self.assertEqual(100, calc.rate)
+        calc.add_measurement(400, 2)
+        self.assertEqual(200, calc.rate)
+
+    def test_smoothing(self):
+        calc = upload.ByteRateCalculator(2)
+        calc.add_measurement(100, 1)
+        self.assertEqual(100, calc.rate)
+
+        calc.add_measurement(400, 2)
+        self.assertEqual(150, calc.rate)
+
+        calc.add_measurement(400, 2)
+        self.assertEqual(175, calc.rate)
+
+
+class TestUploadProgress(unittest.TestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        self._log = logging.getLogger(__file__)
+
+    def test_write_proxy(self):
+        mem_hdl = io.BytesIO()
+        proxy = upload.UploadProgressWriteProxy(self._log, self._loop, 1000, mem_hdl)
+
+        data = b'test_bytes'
+
+        proxy.write(data)
+        self.assertEqual(data, mem_hdl.getvalue())
+        self.assertEqual(len(data), proxy.bytes_written)
+        self.assertEqual(1000, proxy.bytes_total)
+        self.assertEqual(1, proxy.progress_percent)
+
+        proxy.close()
+        self.assertTrue(mem_hdl.closed)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b1f6a7f
--- /dev/null
@@ -0,0 +1,67 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwlaunchpad)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/convert_pkg.py
+    rift/tasklets/${TASKLET_NAME}/datacenters.py
+    rift/tasklets/${TASKLET_NAME}/export.py
+    rift/tasklets/${TASKLET_NAME}/extract.py
+    rift/tasklets/${TASKLET_NAME}/image.py
+    rift/tasklets/${TASKLET_NAME}/message.py
+    rift/tasklets/${TASKLET_NAME}/onboard.py
+    rift/tasklets/${TASKLET_NAME}/state.py
+    rift/tasklets/${TASKLET_NAME}/tasklet.py
+    rift/tasklets/${TASKLET_NAME}/tosca.py
+    rift/tasklets/${TASKLET_NAME}/uploader.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_python_install_tree(
+  FILES
+    rift/package/__init__.py
+    rift/package/archive.py
+    rift/package/charm.py
+    rift/package/checksums.py
+    rift/package/config.py
+    rift/package/convert.py
+    rift/package/icon.py
+    rift/package/image.py
+    rift/package/package.py
+    rift/package/script.py
+    rift/package/store.py
+    rift/package/cloud_init.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
+rift_add_subdirs(test scripts)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile b/rwlaunchpad/plugins/rwlaunchpadtasklet/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/archive.py
new file mode 100644 (file)
index 0000000..fffce99
--- /dev/null
@@ -0,0 +1,152 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import io
+import os
+import tarfile
+import time
+
+from . import package
+
+class ArchiveError(Exception):
+    pass
+
+
+def get_size(hdl):
+    """ Get number of bytes of content within file hdl
+    Set the file position to original position before returning
+
+    Returns:
+        Number of bytes in the hdl file object
+    """
+    old_pos = hdl.tell()
+    hdl.seek(0, os.SEEK_END)
+    size = hdl.tell()
+    hdl.seek(old_pos)
+
+    return size
+
+
+class TarPackageArchive(object):
+    """  This class represents a package stored within a tar.gz archive file """
+    def __init__(self, log, tar_file_hdl, mode="r"):
+        self._log = log
+        self._tar_filehdl = tar_file_hdl
+        self._tar_infos = {}
+
+        self._tarfile = tarfile.open(fileobj=tar_file_hdl, mode=mode)
+
+        self.load_archive()
+
+    @classmethod
+    def from_package(cls, log, pkg, tar_file_hdl):
+        """ Creates a TarPackageArchive from a existing Package
+
+        Arguments:
+            log - logger
+            pkg - a DescriptorPackage instance
+            tar_file_hdl - a writeable file handle to write tar archive data
+
+        Returns:
+            A TarPackageArchive instance
+        """
+
+        def set_common_tarinfo_fields(tar_info):
+            tar_info.uid = os.getuid()
+            tar_info.gid = os.getgid()
+            tar_info.mtime = time.time()
+            tar_info.uname = "rift"
+            tar_info.gname = "rift"
+
+        archive = TarPackageArchive(log, tar_file_hdl, mode='w:gz')
+        for pkg_file in pkg.files:
+            tar_info = tarfile.TarInfo(name=pkg_file)
+            tar_info.type = tarfile.REGTYPE
+            tar_info.mode = pkg.get_file_mode(pkg_file)
+            set_common_tarinfo_fields(tar_info)
+            with pkg.open(pkg_file) as pkg_file_hdl:
+                tar_info.size = get_size(pkg_file_hdl)
+                archive.tarfile.addfile(tar_info, pkg_file_hdl)
+
+        for pkg_dir in pkg.dirs:
+            tar_info = tarfile.TarInfo(name=pkg_dir)
+            tar_info.type = tarfile.DIRTYPE
+            tar_info.mode = 0o775
+            set_common_tarinfo_fields(tar_info)
+            archive.tarfile.addfile(tar_info)
+
+        archive.load_archive()
+        archive.close()
+
+        return archive
+
+    def __repr__(self):
+        return "TarPackageArchive(%s)" % self._tar_filehdl
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """ Close the opened tarfile"""
+        if self._tarfile is not None:
+            self._tarfile.close()
+            self._tarfile = None
+
+    def load_archive(self):
+        self._tar_infos = {info.name: info for info in self._tarfile.getmembers() if info.name}
+
+    @property
+    def tarfile(self):
+        return self._tarfile
+
+    @property
+    def filenames(self):
+        """ The list of file members within the tar file """
+        return [name for name in self._tar_infos if tarfile.TarInfo.isfile(self._tar_infos[name])]
+
+    def open_file(self, rel_file_path):
+        """ Opens a file within the archive as read-only, byte mode.
+
+        Arguments:
+            rel_file_path - The file path within the archive to open
+
+        Returns:
+            A file like object (see tarfile.extractfile())
+
+        Raises:
+            FileNotFoundError - The file could not be found within the archive.
+            ArchiveError - The file could not be opened for some generic reason.
+        """
+        if rel_file_path not in self._tar_infos:
+            raise FileNotFoundError("Could not find %s in tar file", rel_file_path)
+
+        try:
+            return self._tarfile.extractfile(rel_file_path)
+        except tarfile.TarError as e:
+            msg = "Failed to read file {} from tarfile {}: {}".format(
+                  rel_file_path, self._tar_filehdl, str(e)
+                  )
+            self._log.error(msg)
+            raise ArchiveError(msg) from e
+
+    def create_package(self):
+        """  Creates a Descriptor package from the archive contents """
+        pkg = package.DescriptorPackage.from_package_files(self._log, self.open_file, self.filenames)
+        for pkg_file in self.filenames:
+            pkg.add_file(pkg_file, self._tar_infos[pkg_file].mode)
+
+        return pkg
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/charm.py
new file mode 100644 (file)
index 0000000..d907731
--- /dev/null
@@ -0,0 +1,96 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class CharmExtractionError(Exception):
+    pass
+
+
+class PackageCharmExtractor(object):
+    """ This class is reponsible for extracting charms to the correct directory
+
+    In order to remain compatible with the existing Jujuclient, we extract the charms
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad"
+            )
+
+    CHARM_REGEX = "{prefix}charms/(trusty/)?(?P<charm_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, charm_name):
+        dest_rel_path = "libs/{}/charms/trusty/{}".format(descriptor_id, charm_name)
+        dest_path = os.path.join(self._install_dir, dest_rel_path)
+        return dest_path
+
+    @classmethod
+    def charm_dir_map(cls, package):
+        charm_map = {}
+        regex = cls.CHARM_REGEX.format(prefix=package.prefix)
+
+        for dir_name in package.dirs:
+            match = re.match(
+                    cls.CHARM_REGEX.format(prefix=package.prefix), dir_name,
+                    )
+            if match is None:
+                continue
+
+            charm_name = match.group("charm_name")
+            if charm_name == "trusty":
+                continue
+
+            charm_map[charm_name] = dir_name
+
+        return charm_map
+
+    def get_extracted_charm_dir(self, package_id, charm_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, charm_name),
+                )
+
+    def extract_charms(self, pkg):
+        """ Extract charms contained within the DescriptorPackage
+        to the known charm directory.
+
+        Arguments:
+            pkg - The descriptor package that MAY contain charm directories
+
+        Raises:
+            CharmExtractionError - Charms in the package failed to get extracted
+        """
+        descriptor_id = pkg.descriptor_id
+        charm_dir_map = PackageCharmExtractor.charm_dir_map(pkg)
+
+        for charm_name, charm_dir in charm_dir_map.items():
+            dest_rel_path = self._get_rel_dest_path(descriptor_id, charm_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            self._log.debug("Extracting %s charm to %s", charm_name, dest_path)
+            try:
+                pkg.extract_dir(charm_dir, dest_path)
+            except package.ExtractError as e:
+                raise CharmExtractionError("Failed to extract charm %s" % charm_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/checksums.py
new file mode 100644 (file)
index 0000000..975967e
--- /dev/null
@@ -0,0 +1,79 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import hashlib
+import re
+
+def checksum_string(s):
+    return hashlib.md5(s.encode('utf-8')).hexdigest()
+
+
+def checksum(fd):
+    """ Calculate a md5 checksum of fd file handle
+
+    Arguments:
+      fd: A file descriptor return from open() call
+
+    Returns:
+      A md5 checksum of the file
+
+    """
+    pos = fd.tell()
+    try:
+        current = hashlib.md5()
+        while True:
+            data = fd.read(2 ** 16)
+            if len(data) == 0:
+                return current.hexdigest()
+            current.update(data)
+    finally:
+        fd.seek(pos)
+
+
+class ArchiveChecksums(dict):
+    @classmethod
+    def from_file_desc(cls, fd):
+        checksum_pattern = re.compile(r"(\S+)\s+(\S+)")
+        checksums = dict()
+
+        pos = fd.tell()
+        try:
+            for line in (line.decode('utf-8').strip() for line in fd if line):
+
+                # Skip comments
+                if line.startswith('#'):
+                    continue
+
+                # Skip lines that do not contain the pattern we are looking for
+                result = checksum_pattern.search(line)
+                if result is None:
+                    continue
+
+                chksum, filepath = result.groups()
+                checksums[filepath] = chksum
+
+        finally:
+            fd.seek(pos)
+
+        return cls(checksums)
+
+    def to_string(self):
+        string = ""
+        for file_name, file_checksum in self.items():
+            string += "{}  {}\n".format(file_name, file_checksum)
+
+        return string
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/cloud_init.py
new file mode 100644 (file)
index 0000000..78c258c
--- /dev/null
@@ -0,0 +1,76 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class CloudInitExtractionError(Exception):
+    pass
+
+
+class PackageCloudInitExtractor(object):
+    """ This class is reponsible for extracting cloud_init scripts to the correct directory
+    """
+
+    SCRIPT_REGEX = "{prefix}/?cloud_init/(?P<script_name>[^/]+)$"
+
+    def __init__(self, log):
+        self._log = log
+
+    @classmethod
+    def package_script_files(cls, package):
+        script_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.SCRIPT_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            script_name = match.group("script_name")
+            script_file_map[script_name] = file_name
+
+        return script_file_map
+
+    def read_script(self, pkg, filename):
+        descriptor_id = pkg.descriptor_id
+        script_files = PackageCloudInitExtractor.package_script_files(pkg)
+
+        for script_name, script_file in script_files.items():
+            if script_name == filename:
+                self._log.debug("Found %s script file in package at %s", filename, script_file)
+
+                try:
+                    with pkg.open(script_file) as f:
+                        userdata = f.read()
+                        self._log.info("cloud_init read from file %s", userdata)
+                        # File contents are read in binary string, decode to regular string and return
+                        return userdata.decode()
+                except package.ExtractError as e:
+                    raise CloudInitExtractionError("Failed to extract script %s" % script_name) from e
+
+        # If we've reached this point but not found a matching cloud_init script, 
+        # raise an Exception, since we got here only because there was supposed 
+        # to be a cloud_init_file in the VDU
+        errmsg = "No cloud-init config file found in the descriptor package"
+        self._log.error(errmsg)
+        raise CloudInitExtractionError(errmsg)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/config.py
new file mode 100644 (file)
index 0000000..9a06116
--- /dev/null
@@ -0,0 +1,93 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import os.path
+
+from . import package
+
+
+class ConfigExtractionError(Exception):
+    pass
+
+
+class PackageConfigExtractor(object):
+    """ This class is reponsible for extracting config data to the correct directory
+
+    In order to remain compatible with the existing ConfigManager, we extract the config
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad"
+            )
+
+    CONFIG_REGEX = "{prefix}(ns_config|vnf_config)/(?P<config_name>[^/]+.yaml)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, config_name):
+        dest_rel_path = "libs/{}/config/{}".format(descriptor_id, config_name)
+        dest_path = os.path.join(self._install_dir, dest_rel_path)
+        return dest_path
+
+    @classmethod
+    def package_config_files(cls, package):
+        config_map = {}
+        regex = cls.CONFIG_REGEX.format(prefix=package.prefix)
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.CONFIG_REGEX.format(prefix=package.prefix), file_name,
+                    )
+            if match is None:
+                continue
+
+            config_name = match.group("config_name")
+
+            config_map[config_name] = file_name
+
+        return config_map
+
+    def get_extracted_config_path(self, package_id, config_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, os.path.basename(config_name)),
+                )
+
+    def extract_configs(self, pkg):
+        """ Extract any configuration files from the DescriptorPackage
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            ConfigExtractionError - The configuration could not be extracted
+        """
+        descriptor_id = pkg.descriptor_id
+
+        config_files = PackageConfigExtractor.package_config_files(pkg).items()
+        for config_name, config_file in config_files:
+            dest_rel_path = self._get_rel_dest_path(descriptor_id, config_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            self._log.debug("Extracting %s config to %s", config_name, dest_path)
+            try:
+                pkg.extract_file(config_file, dest_path)
+            except package.ExtractError as e:
+                raise ConfigExtractionError("Failed to extract config %s" % config_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/convert.py
new file mode 100644 (file)
index 0000000..7571c57
--- /dev/null
@@ -0,0 +1,283 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import json
+import os
+import tempfile
+
+import gi
+gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwYang', '1.0')
+from gi.repository import (
+        RwNsdYang,
+        RwVnfdYang,
+        NsdYang,
+        VnfdYang,
+        RwYang,
+        )
+
+
+class UnknownExtensionError(Exception):
+    pass
+
+
+class SerializationError(Exception):
+    pass
+
+
+def decode(desc_data):
+    if isinstance(desc_data, bytes):
+        desc_data = desc_data.decode()
+
+    return desc_data
+
+
+class ProtoMessageSerializer(object):
+    """(De)Serializer/deserializer fo a specific protobuf message into various formats"""
+    libncx_model = None
+
+    def __init__(self, yang_ns, yang_pb_cls):
+        """ Create a serializer for a specific protobuf message """
+        self._yang_ns = yang_ns
+        self._yang_pb_cls = yang_pb_cls
+
+    @classmethod
+    def _deserialize_extension_method_map(cls):
+        return {
+                ".xml": cls._from_xml_file_hdl,
+                ".yml": cls._from_yaml_file_hdl,
+                ".yaml": cls._from_yaml_file_hdl,
+                ".json": cls._from_json_file_hdl,
+                }
+
+    @classmethod
+    def _serialize_extension_method_map(cls):
+        return {
+                ".xml": cls.to_xml_string,
+                ".yml": cls.to_yaml_string,
+                ".yaml": cls.to_yaml_string,
+                ".json": cls.to_json_string,
+                }
+
+    @classmethod
+    def is_supported_file(cls, filename):
+        """Returns whether a file has a supported file extension
+
+        Arguments:
+            filename - A descriptor file
+
+        Returns:
+            True if file extension is supported, False otherwise
+
+        """
+        _, extension = os.path.splitext(filename)
+        extension_lc = extension.lower()
+
+        return extension_lc in cls._deserialize_extension_method_map()
+
+    @property
+    def yang_namespace(self):
+        """ The Protobuf's GI namespace class (e.g. RwVnfdYang) """
+        return self._yang_ns
+
+    @property
+    def yang_class(self):
+        """ The Protobuf's GI class (e.g. RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd) """
+        return self._yang_pb_cls
+
+    @property
+    def model(self):
+        cls = self.__class__
+
+        # Cache the libncx model for the serializer class
+        if cls.libncx_model is None:
+            cls.libncx_model = RwYang.model_create_libncx()
+            cls.libncx_model.load_schema_ypbc(self.yang_namespace.get_schema())
+
+        return cls.libncx_model
+
+    def _from_xml_file_hdl(self, file_hdl):
+        xml = file_hdl.read()
+
+        return self.yang_class.from_xml_v2(self.model, decode(xml), strict=False)
+
+    def _from_json_file_hdl(self, file_hdl):
+        json = file_hdl.read()
+
+        return self.yang_class.from_json(self.model, decode(json), strict=False)
+
+    def _from_yaml_file_hdl(self, file_hdl):
+        yaml = file_hdl.read()
+
+        return self.yang_class.from_yaml(self.model, decode(yaml), strict=False)
+
+    def to_json_string(self, pb_msg):
+        """ Serialize a protobuf message into JSON
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A JSON string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            json_str = pb_msg.to_json(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return json_str
+
+    def to_yaml_string(self, pb_msg):
+        """ Serialize a protobuf message into YAML
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A YAML string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            yaml_str = pb_msg.to_yaml(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return yaml_str
+
+    def to_xml_string(self, pb_msg):
+        """ Serialize a protobuf message into XML
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+
+        Returns:
+            A XML string representing the protobuf message
+
+        Raises:
+            SerializationError - Message could not be serialized
+            TypeError - Incorrect protobuf type provided
+        """
+        if not isinstance(pb_msg, self._yang_pb_cls):
+            raise TypeError("Invalid protobuf message type provided")
+
+        try:
+            xml_str = pb_msg.to_xml_v2(self.model)
+
+        except Exception as e:
+            raise SerializationError(e)
+
+        return xml_str
+
+    def from_file_hdl(self, file_hdl, extension):
+        """ Returns the deserialized protobuf message from file contents
+
+        This function determines the serialization format based on file extension
+
+        Arguments:
+            file_hdl - The file hdl to deserialize (set at pos 0)
+            extension - Extension of the file format (second item of os.path.splitext())
+
+        Returns:
+            A GI-Proto message of type that was provided into the constructor
+
+        Raises:
+            UnknownExtensionError - File extension is not of a known serialization format
+            SerializationError - File failed to be deserialized into the protobuf message
+        """
+
+        extension_lc = extension.lower()
+        extension_map = self._deserialize_extension_method_map()
+
+        if extension_lc not in extension_map:
+            raise UnknownExtensionError("Cannot detect message format for %s extension" % extension_lc)
+
+        try:
+            msg = extension_map[extension_lc](self, file_hdl)
+        except Exception as e:
+            raise SerializationError(e)
+
+        return msg
+
+    def to_string(self, pb_msg, extension):
+        """ Returns the serialized protobuf message for a particular file extension
+
+        This function determines the serialization format based on file extension
+
+        Arguments:
+            pb_msg - A GI-protobuf object of type provided into constructor
+            extension - Extension of the file format (second item of os.path.splitext())
+
+        Returns:
+            A GI-Proto message of type that was provided into the constructor
+
+        Raises:
+            UnknownExtensionError - File extension is not of a known serialization format
+            SerializationError - File failed to be deserialized into the protobuf message
+        """
+
+        extension_lc = extension.lower()
+        extension_map = self._serialize_extension_method_map()
+
+        if extension_lc not in extension_map:
+            raise UnknownExtensionError("Cannot detect message format for %s extension" % extension_lc)
+
+        try:
+            msg = extension_map[extension_lc](self, pb_msg)
+        except Exception as e:
+            raise SerializationError(e)
+
+        return msg
+
+
+class VnfdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the VNFD descriptor"""
+    def __init__(self):
+        super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+
+
+class NsdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the NSD descriptor"""
+    def __init__(self):
+        super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd)
+
+
+class RwVnfdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the VNFD descriptor"""
+    def __init__(self):
+        super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+
+
+class RwNsdSerializer(ProtoMessageSerializer):
+    """ Creates a serializer for the NSD descriptor"""
+    def __init__(self):
+        super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/icon.py
new file mode 100644 (file)
index 0000000..1c3d209
--- /dev/null
@@ -0,0 +1,96 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import os.path
+
+from . import package
+
+class IconExtractionError(Exception):
+    pass
+
+
+class PackageIconExtractor(object):
+    """ This class extracts icons to a known location for the UI to access """
+
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_INSTALL"],
+            "usr/share/rw.ui/skyquake/plugins/composer/public/assets/logos"
+            )
+
+    ICON_REGEX = "{prefix}/?icons/(?P<icon_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_type, descriptor_id, icon_name):
+        dest_path = os.path.join(self._install_dir, descriptor_type, descriptor_id, icon_name)
+        return dest_path
+
+    @classmethod
+    def package_icon_files(cls, package):
+        icon_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.ICON_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            icon_name = match.group("icon_name")
+
+            icon_file_map[icon_name] = file_name
+
+        return icon_file_map
+
+    def get_extracted_icon_path(self, descriptor_type, descriptor_id, icon_name):
+        return os.path.join(
+                self._get_rel_dest_path(descriptor_type, descriptor_id, icon_name),
+                )
+
+    def extract_icons(self, pkg):
+        """ Extract any icons in the package to the UI filesystem location
+
+        Arguments:
+            pkg - A DescriptorPackage
+        """
+        descriptor_id = pkg.descriptor_id
+        icon_files = PackageIconExtractor.package_icon_files(pkg)
+
+        for icon_name, icon_file in icon_files.items():
+            dest_rel_path = self._get_rel_dest_path(pkg.descriptor_type, descriptor_id, icon_name)
+            dest_path = os.path.join(self._install_dir, dest_rel_path)
+
+            dest_dir = os.path.dirname(dest_path)
+            try:
+                os.makedirs(dest_dir, exist_ok=True)
+            except OSError as e:
+                self._log.error("Failed to create icon directory %s: %s", dest_dir, str(e))
+                continue
+
+
+            self._log.debug("Extracting %s icon to %s", icon_name, dest_path)
+            try:
+                pkg.extract_file(icon_file, dest_path)
+            except package.ExtractError as e:
+                self._log.error("Failed to extact icon %s: %s", icon_name, str(e))
+                continue
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/image.py
new file mode 100644 (file)
index 0000000..9b9d17a
--- /dev/null
@@ -0,0 +1,55 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+
+IMAGE_REGEX = r"{prefix}/?images/(?P<image_name>[^/]+.\.qcow2)$"
+
+
+def is_image_file(image_path):
+    match = re.match(
+            IMAGE_REGEX.format(prefix=".*"),
+            image_path,
+            )
+
+    return match is not None
+
+
+def get_package_image_files(package):
+    """ Return a image name/file map for images in the descriptor
+
+    Arguments:
+        package - A DescriptorPackage
+
+    Returns:
+        A dictionary mapping image names to the relative path within
+        the package.
+    """
+    image_file_map = {}
+
+    for file_name in package.files:
+        match = re.match(
+                IMAGE_REGEX.format(prefix=package.prefix),
+                file_name,
+                )
+        if match is None:
+            continue
+
+        image_name = match.group("image_name")
+        image_file_map[image_name] = file_name
+
+    return image_file_map
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/package.py
new file mode 100644 (file)
index 0000000..355b23b
--- /dev/null
@@ -0,0 +1,658 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import io
+import os
+import re
+import shutil
+import tarfile
+
+from . import checksums
+from . import convert
+from . import image
+
+
+class ArchiveError(Exception):
+    pass
+
+
+class ExtractError(Exception):
+    pass
+
+
+class PackageError(Exception):
+    pass
+
+
+class PackageValidationError(Exception):
+    pass
+
+
+class PackageFileChecksumError(PackageValidationError):
+    def __init__(self, filename):
+        self.filename = filename
+        super().__init__("Checksum mismatch for {}".format(filename))
+
+
+class DescriptorPackage(object):
+    """ This class provides an base class for a descriptor package representing
+
+    A descriptor package is a package which contains a single descriptor and any
+    associated files (logos, charms, scripts, etc).  This package representation
+    attempts to be agnostic as to where the package files are being stored
+    (in memory, on disk, etc).
+
+    The package provides a simple interface to interact with the files within the
+    package and access the contained descriptor.
+    """
+    DESCRIPTOR_REGEX = r"{prefix}({descriptor_type}/[^/]*|[^/]*{descriptor_type})\.(xml|yml|yaml|json)$"
+
+    def __init__(self, log, open_fn):
+        self._log = log
+        self._open_fn = open_fn
+
+        self._package_file_mode_map = {}
+        self._package_dirs = set()
+
+    @property
+    def prefix(self):
+        """ Return the leading parent directories shared by all files in the package
+
+        In order to remain flexible as to where tar was invoked to create the package,
+        the prefix represents the common parent directory path which all files in the
+        package have in common.
+        """
+        entries = list(self._package_file_mode_map) + list(self._package_dirs)
+
+        if len(entries) > 1:
+            prefix = os.path.commonprefix(entries)
+            if prefix and not prefix.endswith("/"):
+                prefix += "/"
+        elif len(entries) == 1:
+            entry = entries[0]
+            if "/" in entry:
+                prefix = os.path.dirname(entry) + "/"
+            else:
+                prefix = ""
+        else:
+            prefix = ""
+
+        return prefix
+
+    @property
+    def files(self):
+        """ Return all files (with the prefix) in the package """
+        return list(self._package_file_mode_map)
+
+    @property
+    def dirs(self):
+        """ Return all directories in the package """
+        return list(self._package_dirs)
+
+    @property
+    def descriptor_type(self):
+        """ A shorthand name for the type of descriptor (e.g. nsd)"""
+        raise NotImplementedError("Subclass must implement this property")
+
+    @property
+    def serializer(self):
+        """ An instance of convert.ProtoMessageSerializer """
+        raise NotImplementedError("Subclass must implement this property")
+
+    @property
+    def descriptor_file(self):
+        """ The descriptor file name (with prefix) """
+        regex = self.__class__.DESCRIPTOR_REGEX.format(
+                descriptor_type=self.descriptor_type,
+                prefix=self.prefix,
+                )
+        desc_file = None
+        for filename in self.files:
+            if re.match(regex, filename):
+                if desc_file is not None:
+                    raise PackageError("Package contains more than one descriptor")
+                desc_file = filename
+
+        if desc_file is None:
+            raise PackageError("Could not find descriptor file in package")
+
+        return desc_file
+
+    @property
+    def descriptor_msg(self):
+        """ The proto-GI descriptor message """
+        filename = self.descriptor_file
+        with self.open(filename) as hdl:
+            _, ext = os.path.splitext(filename)
+            nsd = self.serializer.from_file_hdl(hdl, ext)
+            return nsd
+
+    @property
+    def json_descriptor(self):
+        """  The JSON serialized descriptor message"""
+        nsd = self.descriptor_msg
+        return self.serializer.to_json_string(nsd)
+
+    @property
+    def descriptor_id(self):
+        """  The descriptor id which uniquely identifies this descriptor in the system """
+        if not self.descriptor_msg.has_field("id"):
+            msg = "Descriptor must have an id field"
+            self._log.error(msg)
+            raise PackageError(msg)
+
+        return self.descriptor_msg.id
+
+    @classmethod
+    def get_descriptor_patterns(cls):
+        """ Returns a tuple of descriptor regex and Package Types  """
+        package_types = (VnfdPackage, NsdPackage)
+        patterns = []
+
+        for pkg_cls in package_types:
+            regex = cls.DESCRIPTOR_REGEX.format(
+                    descriptor_type=pkg_cls.DESCRIPTOR_TYPE,
+                    prefix=".*"
+                    )
+
+            patterns.append((regex, pkg_cls))
+
+        return patterns
+
+    @classmethod
+    def from_package_files(cls, log, open_fn, files):
+        """ Creates a new DescriptorPackage subclass instance from a list of files
+
+        This classmethod detects the Package type from the package contents
+        and returns a new Package instance.
+
+        This will NOT subsequently add the files to the package so that must
+        be done by the client
+
+        Arguments:
+            log - A logger
+            open_fn - A function which can take a file name and mode and return
+                      a file handle.
+            files - A list of files which would be added to the package after
+                    intantiation
+
+        Returns:
+            A new DescriptorPackage subclass of the correct type for the descriptor
+
+        Raises:
+            PackageError - Package type could not be determined from the list of files.
+        """
+        patterns = cls.get_descriptor_patterns()
+        pkg_cls = None
+        regexes = set()
+        for name in files:
+            for regex, cls in patterns:
+                regexes.add(regex)
+                if re.match(regex, name) is not None:
+                    pkg_cls = cls
+                    break
+
+        if pkg_cls is None:
+            log.error("No file in archive matched known descriptor formats: %s", regexes)
+            raise PackageError("Could not determine package type from contents")
+
+        package = pkg_cls(log, open_fn)
+        return package
+
+    @classmethod
+    def from_descriptor_file_hdl(cls, log, file_hdl):
+        """ Creates a new DescriptorPackage from a descriptor file handle
+
+        The descriptor file is added to the package before returning.
+
+        Arguments:
+            log - A logger
+            file_hdl - A file handle whose name attribute can be recognized as
+                       particular descriptor type.
+
+        Returns:
+            A new DescriptorPackage subclass of the correct type for the descriptor
+
+        Raises:
+            PackageError - Package type could not be determined from the list of files.
+            ValueError - file_hdl did not have a name attribute provided
+        """
+
+        package_types = (VnfdPackage, NsdPackage)
+        filename_patterns = []
+        for package_cls in package_types:
+            filename_patterns.append(
+                    (r".*{}.*".format(package_cls.DESCRIPTOR_TYPE), package_cls)
+                    )
+
+        if not hasattr(file_hdl, 'name'):
+            raise ValueError("File descriptor must have a name attribute to create a descriptor package")
+
+        # Iterate through the recognized patterns and assign files accordingly
+        package_cls = None
+        for pattern, cls in filename_patterns:
+            if re.match(pattern, file_hdl.name):
+                package_cls = cls
+                break
+
+        if not package_cls:
+            raise PackageError("Could not determine package type from file name: %s" % file_hdl.name)
+
+        _, ext = os.path.splitext(file_hdl.name)
+        try:
+            package_cls.SERIALIZER.from_file_hdl(file_hdl, ext)
+        except convert.SerializationError as e:
+            raise PackageError("Could not deserialize descriptor %s" % file_hdl.name) from e
+
+        # Create a new file handle for each open call to prevent independent clients
+        # from affecting each other
+        file_hdl.seek(0)
+        new_hdl = io.BytesIO(file_hdl.read())
+
+        def do_open(file_path):
+            assert file_path == file_hdl.name
+            hdl = io.BytesIO(new_hdl.getvalue())
+            return hdl
+
+        desc_pkg = package_cls(log, do_open)
+        desc_pkg.add_file(file_hdl.name)
+
+        return desc_pkg
+
+    def get_file_mode(self, pkg_file):
+        """ Returns the file mode for the package file
+
+        Arguments:
+            pkg_file - A file name in the package
+
+        Returns:
+            The permission mode
+
+        Raises:
+            PackageError - The file does not exist in the package
+        """
+        try:
+            return self._package_file_mode_map[pkg_file]
+        except KeyError as e:
+            msg = "Could not find package_file: %s" % pkg_file
+            self._log.error(msg)
+            raise PackageError(msg) from e
+
+    def extract_dir(self, src_dir, dest_root_dir, extract_images=False):
+        """ Extract a specific directory contents to dest_root_dir
+
+        Arguments:
+            src_dir - A directory within the package (None means all files/directories)
+            dest_root_dir - A directory to extract directory contents to
+            extract_images - A flag indicating whether we want to extract images
+
+        Raises:
+            ExtractError - Directory contents could not be extracted
+        """
+        if src_dir is not None and src_dir not in self._package_dirs:
+            raise ExtractError("Could not find source dir: %s" % src_dir)
+
+        for filename in self.files:
+            if not extract_images and image.is_image_file(filename):
+                continue
+
+            if src_dir is not None and not filename.startswith(src_dir):
+                continue
+
+            # Copy the contents of the file to the correct path
+            dest_file_path = os.path.join(dest_root_dir, filename)
+            dest_dir_path = os.path.dirname(dest_file_path)
+            if not os.path.exists(dest_dir_path):
+                os.makedirs(dest_dir_path)
+
+            with open(dest_file_path, 'wb') as dst_hdl:
+                with self.open(filename) as src_hdl:
+                    shutil.copyfileobj(src_hdl, dst_hdl, 10 * 1024 * 1024)
+
+                    # Set the file mode to original
+                    os.chmod(dest_file_path, self._package_file_mode_map[filename])
+
+    def extract_file(self, src_file, dest_file):
+        """ Extract a specific package file to dest_file
+
+        The destination directory will be created if it does not exist.
+
+        Arguments:
+            src_file - A file within the package
+            dest_file - A file path to extract file contents to
+
+        Raises:
+            ExtractError - Directory contents could not be extracted
+        """
+        if src_file not in self._package_file_mode_map:
+            msg = "Could not find source file %s" % src_file
+            self._log.error(msg)
+            raise ExtractError(msg)
+
+        # Copy the contents of the file to the correct path
+        dest_dir_path = os.path.dirname(dest_file)
+        if not os.path.isdir(dest_dir_path):
+            os.makedirs(dest_dir_path)
+
+        with open(dest_file, 'wb') as dst_hdl:
+            with self.open(src_file) as src_hdl:
+                shutil.copyfileobj(src_hdl, dst_hdl, 10 * 1024 * 1024)
+
+                # Set the file mode to original
+                os.chmod(dest_file, self._package_file_mode_map[src_file])
+
+    def extract(self, dest_root_dir, extract_images=False):
+        """ Extract all package contents to a destination directory
+
+        Arguments:
+            dest_root_dir - The directory to extract package contents to
+
+        Raises:
+            NotADirectoryError - dest_root_dir is not a directory
+        """
+        if not os.path.isdir(dest_root_dir):
+            raise NotADirectoryError(dest_root_dir)
+
+        self.extract_dir(None, dest_root_dir, extract_images)
+
+    def open(self, rel_path):
+        """ Open a file contained in the package in read-only, binary mode.
+
+        Arguments:
+            rel_path - The file path within the package
+
+        Returns:
+            A file-like object opened in read-only mode.
+
+        Raises:
+            PackageError - The file could not be opened
+        """
+        try:
+            return self._open_fn(rel_path)
+        except Exception as e:
+            msg = "Could not open file from package: %s" % rel_path
+            self._log.warning(msg)
+            raise PackageError(msg) from e
+
+    def add_file(self, rel_path, mode=0o777):
+        """ Add a file to the package.
+
+        The file should be specified as a relative path to the package
+        root.  The open_fn provided in the constructor must be able to
+        take the relative path and open the actual source file from
+        wherever the file actually is stored.
+
+        If the file's parent directories do not yet exist, add them to
+        the package.
+
+        Arguments:
+            rel_path - The file path relative to the top of the package.
+            mode - The permission mode the file should be stored with so
+                   it can be extracted with the correct permissions.
+
+        Raises:
+            PackageError - The file could not be added to the package
+        """
+        if not rel_path:
+            raise PackageError("Empty file name added")
+
+        if rel_path in self._package_file_mode_map:
+            raise PackageError("File %s already exists in package" % rel_path)
+
+        # If the file's directory is not in the package add it.
+        rel_dir = os.path.dirname(rel_path)
+        while rel_dir:
+            self._package_dirs.add(rel_dir)
+            rel_dir = os.path.dirname(rel_dir)
+
+        self._package_file_mode_map[rel_path] = mode
+
+    def add_dir(self, rel_path):
+        """ Add a directory to the package
+
+        Arguments:
+            rel_path - The directories relative path.
+
+        Raises:
+            PackageError - A file already exists in the package with the same name.
+        """
+        if rel_path in self._package_file_mode_map:
+            raise PackageError("File already exists with the same name: %s", rel_path)
+
+        if rel_path in self._package_dirs:
+            self._log.warning("%s directory already exists", rel_path)
+            return
+
+        self._package_dirs.add(rel_path)
+
+
+class NsdPackage(DescriptorPackage):
+    DESCRIPTOR_TYPE = "nsd"
+    SERIALIZER = convert.RwNsdSerializer()
+
+    @property
+    def descriptor_type(self):
+        return "nsd"
+
+    @property
+    def serializer(self):
+        return NsdPackage.SERIALIZER
+
+
+class VnfdPackage(DescriptorPackage):
+    DESCRIPTOR_TYPE = "vnfd"
+    SERIALIZER = convert.RwVnfdSerializer()
+
+    @property
+    def descriptor_type(self):
+        return "vnfd"
+
+    @property
+    def serializer(self):
+        return VnfdPackage.SERIALIZER
+
+
+class PackageChecksumValidator(object):
+    """  This class uses the checksums.txt file in the package
+    and validates that all files in the package match the checksum that exists within
+    the file.
+    """
+    CHECKSUM_FILE = "{prefix}checksums.txt"
+
+    def __init__(self, log):
+        self._log = log
+
+    @classmethod
+    def get_package_checksum_file(cls, package):
+        checksum_file = cls.CHECKSUM_FILE.format(prefix=package.prefix)
+        if checksum_file not in package.files:
+            raise FileNotFoundError("%s does not exist in archive" % checksum_file)
+
+        return checksum_file
+
+    def validate(self, package):
+        """ Validate file checksums match that in the checksums.txt
+
+        Arguments:
+            package - The Descriptor Package which possiblity contains checksums.txt
+
+        Returns: A dictionary of files that were validated by the checksums.txt
+                 along with their checksums
+
+        Raises:
+            PackageValidationError - The package validation failed for some
+              generic reason.
+            PackageFileChecksumError - A file within the package did not match the
+              checksum within checksums.txt
+        """
+        validated_file_checksums = {}
+
+        try:
+            checksum_file = PackageChecksumValidator.get_package_checksum_file(package)
+            with package.open(checksum_file) as checksum_hdl:
+                archive_checksums = checksums.ArchiveChecksums.from_file_desc(checksum_hdl)
+        except (FileNotFoundError, PackageError) as e:
+            self._log.warning("Could not open package checksum file.  Not validating checksums.")
+            return validated_file_checksums
+
+        for pkg_file in package.files:
+            if pkg_file == checksum_file:
+                continue
+
+            pkg_file_no_prefix = pkg_file.replace(package.prefix, "", 1)
+            if pkg_file_no_prefix not in archive_checksums:
+                self._log.warning("File %s not found in checksum file %s",
+                                  pkg_file, checksum_file)
+                continue
+
+            try:
+                with package.open(pkg_file) as pkg_file_hdl:
+                    file_checksum = checksums.checksum(pkg_file_hdl)
+            except PackageError as e:
+                msg = "Could not read package file {} for checksum validation: {}".format(
+                      pkg_file, str(e))
+                self._log.error(msg)
+                raise PackageValidationError(msg) from e
+
+            if archive_checksums[pkg_file_no_prefix] != file_checksum:
+                msg = "{} checksum ({}) did match expected checksum ({})".format(
+                        pkg_file, file_checksum, archive_checksums[pkg_file_no_prefix]
+                        )
+                self._log.error(msg)
+                raise PackageFileChecksumError(pkg_file)
+
+            validated_file_checksums[pkg_file] = file_checksum
+
+        return validated_file_checksums
+
+
+class TarPackageArchive(object):
+    """  This class represents a package stored within a tar.gz archive file """
+    def __init__(self, log, tar_file_hdl, mode="r"):
+        self._log = log
+        self._tar_filepath = tar_file_hdl
+        self._tar_infos = {}
+
+        self._tarfile = tarfile.open(fileobj=tar_file_hdl, mode=mode)
+
+        self._load_archive()
+
+    def __repr__(self):
+        return "TarPackageArchive(%s)" % self._tar_filepath
+
+    def _get_members(self):
+        return [info for info in self._tarfile.getmembers()]
+
+    def _load_archive(self):
+        self._tar_infos = {info.name: info for info in self._get_members() if info.name}
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """ Close the opened tarfile"""
+        if self._tarfile is not None:
+            self._tarfile.close()
+            self._tarfile = None
+
+    @property
+    def filenames(self):
+        """ The list of file members within the tar file """
+        return [name for name in self._tar_infos if tarfile.TarInfo.isfile(self._tar_infos[name])]
+
+    def open_file(self, rel_file_path):
+        """ Opens a file within the archive as read-only, byte mode.
+
+        Arguments:
+            rel_file_path - The file path within the archive to open
+
+        Returns:
+            A file like object (see tarfile.extractfile())
+
+        Raises:
+            ArchiveError - The file could not be opened for some generic reason.
+        """
+        if rel_file_path not in self._tar_infos:
+            raise ArchiveError("Could not find %s in tar file", rel_file_path)
+
+        try:
+            return self._tarfile.extractfile(rel_file_path)
+        except tarfile.TarError as e:
+            msg = "Failed to read file {} from tarfile {}: {}".format(
+                  rel_file_path, self._tar_filepath, str(e)
+                  )
+            self._log.error(msg)
+            raise ArchiveError(msg) from e
+
+    def create_package(self):
+        """  Creates a Descriptor package from the archive contents
+
+        Returns:
+            A DescriptorPackage of the correct descriptor type
+        """
+        package = DescriptorPackage.from_package_files(self._log, self.open_file, self.filenames)
+        for pkg_file in self.filenames:
+            package.add_file(pkg_file, self._tar_infos[pkg_file].mode)
+
+        return package
+
+
+class TemporaryPackage(object):
+    """  This class is a container for a temporary file-backed package
+
+    This class contains a DescriptorPackage and can be used in place of one.
+    Provides a useful context manager which will close and destroy the file
+    that is backing the DescriptorPackage on exit.
+    """
+    def __init__(self, log, package, file_hdl):
+        self._log = log
+        self._package = package
+        self._file_hdl = file_hdl
+
+        if not hasattr(self._file_hdl, "name"):
+            raise ValueError("File handle must have a name attribute")
+
+    def __getattr__(self, attr):
+        return getattr(self._package, attr)
+
+    def __enter__(self):
+        return self._package
+
+    def __exit__(self, type, value, tb):
+        self.close()
+
+    def filename(self):
+        """ Returns the filepath with is backing the Package """
+        return self._file_hdl.name
+
+    def package(self):
+        """ The contained DescriptorPackage instance """
+        return self._package
+
+    def close(self):
+        """ Close and remove the backed file """
+        filename = self._file_hdl.name
+
+        try:
+            self._file_hdl.close()
+        except OSError as e:
+            self._log.warning("Failed to close package file: %s", str(e))
+
+        try:
+            os.remove(filename)
+        except OSError as e:
+            self._log.warning("Failed to remove package file: %s", str(e))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/script.py
new file mode 100644 (file)
index 0000000..01f66b0
--- /dev/null
@@ -0,0 +1,84 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import re
+import os.path
+
+from . import package
+
+
+class ScriptExtractionError(Exception):
+    pass
+
+
+class PackageScriptExtractor(object):
+    """ This class is reponsible for extracting scripts to the correct directory
+
+    In order to remain compatible with the existing config manager, we extract the scripts
+    to a known location (RIFT-13282)
+    """
+    DEFAULT_INSTALL_DIR = os.path.join(
+            os.environ["RIFT_INSTALL"],
+            "usr/bin"
+            )
+
+    SCRIPT_REGEX = "{prefix}/?scripts/(?P<script_name>[^/]+)$"
+
+    def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
+        self._log = log
+        self._install_dir = install_dir
+
+    def _get_rel_dest_path(self, descriptor_id, script_name):
+        dest_path = os.path.join(self._install_dir, script_name)
+        return dest_path
+
+    @classmethod
+    def package_script_files(cls, package):
+        script_file_map = {}
+
+        for file_name in package.files:
+            match = re.match(
+                    cls.SCRIPT_REGEX.format(prefix=package.prefix),
+                    file_name,
+                    )
+            if match is None:
+                continue
+
+            script_name = match.group("script_name")
+
+            script_file_map[script_name] = file_name
+
+        return script_file_map
+
+    def get_extracted_script_path(self, package_id, script_name):
+        return os.path.join(
+                self._get_rel_dest_path(package_id, script_name),
+                )
+
+    def extract_scripts(self, pkg):
+        descriptor_id = pkg.descriptor_id
+        script_files = PackageScriptExtractor.package_script_files(pkg)
+
+        for script_name, script_file in script_files.items():
+            dest_path = self._get_rel_dest_path(descriptor_id, script_name)
+
+            self._log.debug("Extracting %s script to %s", script_name, dest_path)
+            try:
+                pkg.extract_file(script_file, dest_path)
+            except package.ExtractError as e:
+                raise ScriptExtractionError("Failed to extract script %s" % script_name) from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/package/store.py
new file mode 100644 (file)
index 0000000..454546d
--- /dev/null
@@ -0,0 +1,211 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import os
+import shutil
+
+from . import package
+
+
+class PackageStoreError(Exception):
+    pass
+
+
+class PackageExistsError(PackageStoreError):
+    pass
+
+
+class PackageNotFoundError(PackageStoreError):
+    pass
+
+
+class PackageFilesystemStore(object):
+    """ This class is able to store/retreive/delete DescriptorPackages on disk
+
+    To insulate components from having to deal with accessing the filesystem directly
+    to deal with onboarded packages, this class provides a convenient interface for
+    storing, retreiving, deleting packages stored on disk.  The interfaces deal directly
+    with DescriptorPackages so clients are abstracted from the actual location on disk.
+    """
+
+    def __init__(self, log, root_dir):
+        self._log = log
+        self._root_dir = root_dir
+        self._package_dirs = {}
+
+        self.refresh()
+
+    def _get_package_dir(self, package_id):
+        return os.path.join(self._root_dir, package_id)
+
+    def _get_package_files(self, package_id):
+        package_files = {}
+
+        package_dir = self._get_package_dir(package_id)
+
+        for dirpath, dirnames, filenames in os.walk(package_dir):
+            for name in filenames:
+                file_path = os.path.join(dirpath, name)
+                file_rel_path = os.path.relpath(file_path, package_dir)
+                package_files[file_rel_path] = file_path
+
+        return package_files
+
+    def refresh(self):
+        """ Refresh the package index from disk  """
+        if not os.path.exists(self._root_dir):
+            self._package_dirs = {}
+            return
+
+        package_dirs = {}
+        for package_id_dir in os.listdir(self._root_dir):
+            try:
+                package_dir_path = os.path.join(self._root_dir, package_id_dir)
+                if not os.path.isdir(package_dir_path):
+                    self._log.warning("Unknown file in package store: %s", package_dir_path)
+                    continue
+
+                files = os.listdir(package_dir_path)
+                if len(files) == 0:
+                    self._log.warning("Package directory %s is empty", package_dir_path)
+                    continue
+
+                package_id = os.path.basename(package_id_dir)
+                package_dirs[package_id] = package_id_dir
+
+            except OSError as e:
+                self._log.warning("Failed to read packages from %s: %s",
+                                  package_dir_path, str(e))
+
+        self._package_dirs = package_dirs
+
+    def get_package(self, package_id):
+        """ Get a DescriptorPackage on disk from the package descriptor id
+
+        Arguments:
+            package_id - The DescriptorPackage.descriptor_id
+
+        Returns:
+            A DescriptorPackage instance of the correct type
+
+        Raises:
+            PackageStoreError- The package could not be retrieved
+        """
+        if package_id not in self._package_dirs:
+            msg = "Package %s not found in %s" % (package_id, self._root_dir)
+            raise PackageStoreError(msg)
+
+        package_files = self._get_package_files(package_id)
+        package_dir = self._get_package_dir(package_id)
+
+        def do_open(pkg_file):
+            pkg_path = os.path.join(package_dir, pkg_file)
+            return open(pkg_path, "rb")
+
+        pkg = package.DescriptorPackage.from_package_files(self._log, do_open, package_files)
+        for pkg_file in package_files:
+            pkg.add_file(pkg_file)
+
+        return pkg
+
+    def store_package(self, pkg):
+        """ Store a DescriptorPackage to disk
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            PackageStoreError - The package could not be stored
+        """
+        if pkg.descriptor_id in self._package_dirs:
+            raise PackageExistsError("Package %s already exists", pkg.descriptor_id)
+
+        package_dir = self._get_package_dir(pkg.descriptor_id)
+
+        try:
+            os.makedirs(package_dir, exist_ok=True)
+        except OSError as e:
+            raise PackageStoreError("Failed to create package dir: %s", package_dir) from e
+
+        try:
+            self._log.debug("Storing package in dir %s", package_dir)
+            pkg.extract(package_dir)
+            self._log.debug("Package stored in dir %s", package_dir)
+        except pkg.PackageError as e:
+            raise PackageStoreError("Failed to extract package to package store") from e
+
+        self._package_dirs[pkg.descriptor_id] = package_dir
+
+    def delete_package(self, descriptor_id):
+        """ Delete a stored DescriptorPackage
+
+        Arguments:
+            descriptor_id - The DescriptorPackage.descriptor_id
+
+        Raises:
+            PackageNotFoundError - The package could not be found
+            PackageStoreError - The package could not be deleted
+        """
+
+        if descriptor_id not in self._package_dirs:
+            raise PackageNotFoundError("Package %s does not exists", descriptor_id)
+
+        package_dir = self._get_package_dir(descriptor_id)
+        try:
+            if os.path.exists(package_dir):
+                self._log.debug("Removing stored package directory: %s", package_dir)
+                shutil.rmtree(package_dir)
+        except OSError as e:
+            raise PackageStoreError(
+                    "Failed to remove stored package directory: %s", package_dir
+                    ) from e
+
+        del self._package_dirs[descriptor_id]
+
+    def update_package(self, pkg):
+        """ Update a stored DescriptorPackage
+
+        Arguments:
+            pkg - A DescriptorPackage
+
+        Raises:
+            PackageNotFoundError - The package could not be found
+            PackageStoreError - The package could not be deleted
+        """
+        self.delete_package(pkg.descriptor_id)
+        self.store_package(pkg)
+
+
+class NsdPackageFilesystemStore(PackageFilesystemStore):
+    DEFAULT_ROOT_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad", "packages", "nsd"
+            )
+
+    def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+        super().__init__(log, root_dir)
+
+
+class VnfdPackageFilesystemStore(PackageFilesystemStore):
+    DEFAULT_ROOT_DIR = os.path.join(
+            os.environ["RIFT_ARTIFACTS"],
+            "launchpad", "packages", "vnfd"
+            )
+
+    def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+        super().__init__(log, root_dir)
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/__init__.py
new file mode 100644 (file)
index 0000000..4c17a07
--- /dev/null
@@ -0,0 +1 @@
+from .tasklet import LaunchpadTasklet
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/convert_pkg.py
new file mode 100644 (file)
index 0000000..ba04e8f
--- /dev/null
@@ -0,0 +1,102 @@
+############################################################################
+# Copyright 2016 RIFT.IO Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+
+import os
+import shutil
+import tempfile
+
+from .tosca import ImportTosca
+
+
+class ConvertPackageError(Exception):
+    pass
+
+
+class ConvertPackage(object):
+    """Convert a package to our YANG model
+
+    Currently only TOSCA to our model is supported
+    """
+
+    def __init__(self, log, filename, pkgfile):
+        self._log = log
+        self._filename = filename
+        self._pkgfile = pkgfile
+        self._tempdir = None
+
+    def convert(self, delete=False):
+        """Convert package to our YANG model
+
+        Arguments:
+          delete: If the pkgfile is to be deleted after converting
+
+        Returns:
+          List of descriptor packages. If the package is not a
+          suported format, None is returned
+
+        Note:
+          This will create a temporary directory and the converted
+          files will be in that. The converted files and directory
+          need to be deleted after use.
+        """
+
+        # Create a temporary directory to store the converted packages
+        tempdir = tempfile.mkdtemp()
+
+        out_files = []
+        converted = False
+        # Check if this is a tosca archive
+        if ImportTosca.is_tosca_package(self._pkgfile):
+            self._log.debug("Uploaded file {} is a TOSCA archive".
+                            format(self._filename))
+            try:
+                tosca = ImportTosca(self._log, self._pkgfile, out_dir=tempdir)
+                out_files = tosca.translate()
+                converted = True
+
+            except Exception as e:
+                self._log.error("Exception converting package from TOSCA {}: {}".
+                                format(self._filename, e))
+
+                # Remove the tempdir
+                try:
+                    shutil.rmtree(tempdir)
+                except OSError as e:
+                    self._log.warning("Unable to remove temporary directory {}: {}".
+                                      format(tempdir, e))
+
+                raise
+
+        # Delete the input file, if converted
+        if converted:
+            self._tempdir = tempdir
+            try:
+                os.remove(self._pkgfile)
+            except OSError as e:
+                self._log.warning("Failed to remove package file: %s", str(e))
+        else:
+            # Remove the temp dir
+            shutil.rmtree(tempdir, ignore_errors=True)
+
+            #Return the input file
+            out_files.append(self._pkgfile)
+
+
+        # Return the converted files
+        self._log.debug("Converted package files: {}".format(out_files))
+        return out_files
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/datacenters.py
new file mode 100644 (file)
index 0000000..84fddb6
--- /dev/null
@@ -0,0 +1,132 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+from gi.repository import (
+    RwDts,
+    RwLaunchpadYang,
+)
+
+import rift.openmano.openmano_client as openmano_client
+import rift.tasklets
+
+
+class DataCenterPublisher(object):
+    """
+    This class is reponsible for exposing the data centers associated with an
+    openmano cloud account.
+    """
+
+    XPATH = "D,/rw-launchpad:datacenters"
+
+    def __init__(self, tasklet):
+        """Creates an instance of a DataCenterPublisher
+
+        Arguments:
+            tasklet - the tasklet that this publisher is registered for
+
+        """
+        self.tasklet = tasklet
+        self.reg = None
+
+    @property
+    def dts(self):
+        """The DTS instance used by this tasklet"""
+        return self.tasklet.dts
+
+    @property
+    def log(self):
+        """The logger used by this tasklet"""
+        return self.tasklet.log
+
+    @property
+    def loop(self):
+        """The event loop used by this tasklet"""
+        return self.tasklet.loop
+
+    @property
+    def accounts(self):
+        """The known openmano cloud accounts"""
+        accounts = list()
+        for acc in self.tasklet.cloud_accounts:
+            if acc.account_type == "openmano":
+                accounts.append(acc.account_msg)
+
+        return accounts
+
+    @asyncio.coroutine
+    def register(self):
+        """Registers the publisher with DTS"""
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                # Create a datacenters instance to hold all of the cloud
+                # account data.
+                datacenters = RwLaunchpadYang.DataCenters()
+
+                # Iterate over the known openmano accounts and populate cloud
+                # account instances with the corresponding data center info
+                for account in self.accounts:
+                    try:
+                        cloud_account = RwLaunchpadYang.CloudAccount()
+                        cloud_account.name = account.name
+
+                        # Create a client for this cloud account to query for
+                        # the associated data centers
+                        client = openmano_client.OpenmanoCliAPI(
+                                self.log,
+                                account.openmano.host,
+                                account.openmano.port,
+                                account.openmano.tenant_id,
+                                )
+
+                        # Populate the cloud account with the data center info
+                        for uuid, name in client.datacenter_list():
+                            cloud_account.datacenters.append(
+                                    RwLaunchpadYang.DataCenter(
+                                        uuid=uuid,
+                                        name=name,
+                                        )
+                                    )
+
+                        datacenters.cloud_accounts.append(cloud_account)
+
+                    except Exception as e:
+                        self.log.exception(e)
+
+                xact_info.respond_xpath(
+                        RwDts.XactRspCode.MORE,
+                        'D,/rw-launchpad:datacenters',
+                        datacenters,
+                        )
+
+                xact_info.respond_xpath(RwDts.XactRspCode.ACK)
+
+            except Exception as e:
+                self.log.exception(e)
+                raise
+
+        handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+
+        with self.dts.group_create() as group:
+            self.reg = group.register(
+                    xpath=DataCenterPublisher.XPATH,
+                    handler=handler,
+                    flags=RwDts.Flag.PUBLISHER,
+                    )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/export.py
new file mode 100644 (file)
index 0000000..4256765
--- /dev/null
@@ -0,0 +1,414 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import io
+import os.path
+import stat
+import time
+import uuid
+
+import tornado.web
+
+import rift.package.archive
+import rift.package.checksums
+import rift.package.package
+import rift.package.store
+import rift.package.image
+
+from . import state
+from . import message
+from . import tosca
+
+import gi
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+
+
+class ExportStart(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-started", "export process started")
+
+
+class ExportSuccess(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-success", "export process successfully completed")
+
+
+class ExportFailure(message.StatusMessage):
+    def __init__(self):
+        super().__init__("export-failure", "export process failed")
+
+
+class ExportError(message.ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("update-error", msg)
+
+
+class ExportSingleDescriptorOnlyError(ExportError):
+    def __init__(self):
+        super().__init__("Only a single descriptor can be exported")
+
+
+class ArchiveExportError(Exception):
+    pass
+
+
+class DescriptorPackageArchiveExporter(object):
+    def __init__(self, log):
+        self._log = log
+
+    def _create_archive_from_package(self, archive_hdl, package, open_fn):
+        orig_open = package.open
+        try:
+            package.open = open_fn
+            archive = rift.package.archive.TarPackageArchive.from_package(
+                    self._log, package, archive_hdl
+                    )
+            return archive
+        finally:
+            package.open = orig_open
+
+    def create_archive(self, archive_hdl, package, desc_json_str, serializer):
+        """ Create a package archive from an existing package, descriptor messages,
+            and a destination serializer.
+
+        In order to stay flexible with the package directory structure and
+        descriptor format, attempt to "augment" the onboarded package with the
+        updated descriptor in the original format.  If the original package
+        contained a checksum file, then recalculate the descriptor checksum.
+
+        Arguments:
+            archive_hdl - An open file handle with 'wb' permissions
+            package - A DescriptorPackage instance
+            desc_json_str - A descriptor (e.g. nsd, vnfd) protobuf message
+            serializer - A destination serializer (e.g. VnfdSerializer)
+
+        Returns:
+            A TarPackageArchive
+
+        Raises:
+            ArchiveExportError - The exported archive failed to create
+
+        """
+        new_desc_msg = serializer.from_file_hdl(io.BytesIO(desc_json_str.encode()), ".json")
+        _, dest_ext = os.path.splitext(package.descriptor_file)
+        new_desc_hdl = io.BytesIO(serializer.to_string(new_desc_msg, dest_ext).encode())
+        descriptor_checksum = rift.package.checksums.checksum(new_desc_hdl)
+
+        checksum_file = None
+        try:
+            checksum_file = rift.package.package.PackageChecksumValidator.get_package_checksum_file(
+                    package
+                    )
+
+        except FileNotFoundError:
+            pass
+
+        # Since we're going to intercept the open function to rewrite the descriptor
+        # and checksum, save a handle to use below
+        open_fn = package.open
+
+        def create_checksum_file_hdl():
+            with open_fn(checksum_file) as checksum_hdl:
+                archive_checksums = rift.package.checksums.ArchiveChecksums.from_file_desc(
+                        checksum_hdl
+                        )
+
+            archive_checksums[package.descriptor_file] = descriptor_checksum
+
+            checksum_hdl = io.BytesIO(archive_checksums.to_string().encode())
+            return checksum_hdl
+
+        def open_wrapper(rel_path):
+            """ Wraps the package open in order to rewrite the descriptor file and checksum """
+            if rel_path == package.descriptor_file:
+                return new_desc_hdl
+
+            elif rel_path == checksum_file:
+                return create_checksum_file_hdl()
+
+            return open_fn(rel_path)
+
+        archive = self._create_archive_from_package(archive_hdl, package, open_wrapper)
+
+        return archive
+
+    def export_package(self, package, export_dir, file_id, json_desc_str, dest_serializer):
+        """ Export package as an archive to the export directory
+
+        Arguments:
+            package - A DescriptorPackage instance
+            export_dir - The directory to export the package archive to
+            file_id - A unique file id to name the archive as (i.e. <file_id>.tar.gz)
+            json_desc_str - A descriptor (e.g. nsd, vnfd) json message string
+            dest_serializer - A destination serializer (e.g. VnfdSerializer)
+
+        Returns:
+            The created archive path
+
+        Raises:
+            ArchiveExportError - The exported archive failed to create
+        """
+        try:
+            os.makedirs(export_dir, exist_ok=True)
+        except FileExistsError:
+            pass
+
+        archive_path = os.path.join(export_dir, file_id + ".tar.gz")
+        with open(archive_path, 'wb') as archive_hdl:
+            try:
+                self.create_archive(
+                    archive_hdl, package, json_desc_str, dest_serializer
+                    )
+            except Exception as e:
+                os.remove(archive_path)
+                msg = "Failed to create exported archive"
+                self._log.error(msg)
+                raise ArchiveExportError(msg) from e
+
+        return archive_path
+
+
+class ExportHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+    def initialize(self, log, loop, store_map, exporter, catalog_map):
+        self.loop = loop
+        self.transaction_id = str(uuid.uuid4())
+        self.log = message.Logger(
+                log,
+                self.application.messages[self.transaction_id],
+                )
+        self.store_map = store_map
+        self.exporter = exporter
+        self.catalog_map = catalog_map
+
+    def get(self, desc_type):
+        if desc_type not in self.catalog_map:
+            raise tornado.web.HTTPError(400, "unknown descriptor type: {}".format(desc_type))
+
+        self.log.message(ExportStart())
+
+        # Parse the IDs
+        ids_query = self.get_query_argument("ids")
+        ids = [id.strip() for id in ids_query.split(',')]
+        if len(ids) != 1:
+            raise message.MessageException(ExportSingleDescriptorOnlyError)
+        desc_id = ids[0]
+
+        catalog = self.catalog_map[desc_type]
+
+        if desc_id not in catalog:
+            raise tornado.web.HTTPError(400, "unknown descriptor id: {}".format(desc_id))
+
+        desc_msg = catalog[desc_id]
+
+        # Get the schema for exporting
+        schema = self.get_argument("schema", default="rift")
+
+        # Get the grammar for exporting
+        grammar = self.get_argument("grammar", default="osm")
+
+        # Get the format for exporting
+        format_ = self.get_argument("format", default="yaml")
+
+        filename = None
+
+        if grammar == 'tosca':
+            filename = "{}.zip".format(self.transaction_id)
+            self.export_tosca(schema, format_, desc_type, desc_id, desc_msg)
+            self.log.message(message.FilenameMessage(filename))
+        else:
+            filename = "{}.tar.gz".format(self.transaction_id)
+            self.export_rift(schema, format_, desc_type, desc_id, desc_msg)
+            self.log.message(message.FilenameMessage(filename))
+
+        self.log.message(ExportSuccess())
+
+        if filename is not None:
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                "filename": filename,
+            }))
+        else:
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+            }))
+
+    def export_rift(self, schema, format_, desc_type, desc_id, desc_msg):
+        convert = rift.package.convert
+        schema_serializer_map = {
+                "rift": {
+                    "vnfd": convert.RwVnfdSerializer,
+                    "nsd": convert.RwNsdSerializer,
+                    },
+                "mano": {
+                    "vnfd": convert.VnfdSerializer,
+                    "nsd": convert.NsdSerializer,
+                    }
+                }
+
+        if schema not in schema_serializer_map:
+            raise tornado.web.HTTPError(400, "unknown schema: {}".format(schema))
+
+        if format_ != "yaml":
+            self.log.warn("Only yaml format supported for export")
+
+        if desc_type not in schema_serializer_map[schema]:
+            raise tornado.web.HTTPError(400, "unknown descriptor type: {}".format(desc_type))
+
+        # Use the rift superset schema as the source
+        src_serializer = schema_serializer_map["rift"][desc_type]()
+
+        dest_serializer = schema_serializer_map[schema][desc_type]()
+
+        package_store = self.store_map[desc_type]
+
+        # Attempt to get the package from the package store
+        # If that fails, create a temporary package using the descriptor only
+        try:
+            package = package_store.get_package(desc_id)
+        except rift.package.store.PackageNotFoundError:
+            self.log.debug("stored package not found.  creating package from descriptor config")
+
+            desc_yaml_str = src_serializer.to_yaml_string(desc_msg)
+            with io.BytesIO(desc_yaml_str.encode()) as hdl:
+                hdl.name = "{}__{}.yaml".format(desc_msg.id, desc_type)
+                package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                    self.log, hdl
+                    )
+
+        self.exporter.export_package(
+                package=package,
+                export_dir=self.application.export_dir,
+                file_id=self.transaction_id,
+                json_desc_str=src_serializer.to_json_string(desc_msg),
+                dest_serializer=dest_serializer,
+                )
+
+    def export_tosca(self, format_, schema, desc_type, desc_id, desc_msg):
+        if format_ != "yaml":
+            self.log.warn("Only yaml format supported for TOSCA export")
+
+        if desc_type != "nsd":
+            raise tornado.web.HTTPError(
+                400,
+                "NSD need to passed to generate TOSCA: {}".format(desc_type))
+
+        def get_pkg_from_store(id_, type_):
+            package = None
+            # Attempt to get the package from the package store
+            try:
+                package_store = self.store_map[type_]
+                package = package_store.get_package(id_)
+
+            except rift.package.store.PackageNotFoundError:
+                self.log.debug("stored package not found for {}.".format(id_))
+            except rift.package.store.PackageStoreError:
+                self.log.debug("stored package error for {}.".format(id_))
+
+            return package
+
+        pkg = tosca.ExportTosca()
+
+        # Add NSD and related descriptors for exporting
+        nsd_id = pkg.add_nsd(desc_msg, get_pkg_from_store(desc_id, "nsd"))
+
+        catalog = self.catalog_map["vnfd"]
+        for const_vnfd in desc_msg.constituent_vnfd:
+            vnfd_id = const_vnfd.vnfd_id_ref
+            if vnfd_id in catalog:
+                pkg.add_vnfd(nsd_id,
+                             catalog[vnfd_id],
+                             get_pkg_from_store(vnfd_id, "vnfd"))
+            else:
+                raise tornado.web.HTTPError(
+                    400,
+                    "Unknown VNFD descriptor {} for NSD {}".
+                    format(vnfd_id, nsd_id))
+
+        # Create the archive.
+        pkg.create_archive(self.transaction_id,
+                           dest=self.application.export_dir)
+
+
+class ExportStateHandler(state.StateHandler):
+    STARTED = ExportStart
+    SUCCESS = ExportSuccess
+    FAILURE = ExportFailure
+
+
+@asyncio.coroutine
+def periodic_export_cleanup(log, loop, export_dir, period_secs=10 * 60, min_age_secs=30 * 60):
+    """ Periodically cleanup old exported archives (.tar.gz files) in export_dir
+
+    Arguments:
+        log - A Logger instance
+        loop - A asyncio event loop
+        export_dir - The directory to cleanup old archives in
+        period_secs - The number of seconds between clean ups
+        min_age_secs - The minimum age of a archive to be eligible for cleanup
+
+    """
+    log.debug("Starting periodic export cleaning for export directory: %s", export_dir)
+
+    # Create export dir if not created yet
+    if not os.path.exists(export_dir):
+        os.makedirs(export_dir)
+
+    while True:
+        yield from asyncio.sleep(period_secs, loop=loop)
+
+        if not os.path.exists(export_dir):
+            continue
+
+        for file_name in os.listdir(export_dir):
+            if not file_name.endswith(".tar.gz"):
+                continue
+
+            file_path = os.path.join(export_dir, file_name)
+
+            try:
+                file_stat = os.stat(file_path)
+            except OSError as e:
+                log.warning("Could not stat old exported archive: %s", str(e))
+                continue
+
+            file_age = time.time() - file_stat[stat.ST_MTIME]
+
+            if file_age < min_age_secs:
+                continue
+
+            log.debug("Cleaning up old exported archive: %s", file_path)
+
+            try:
+                os.remove(file_path)
+            except OSError as e:
+                log.warning("Failed to remove old exported archive: %s", str(e))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/extract.py
new file mode 100644 (file)
index 0000000..7c0eab8
--- /dev/null
@@ -0,0 +1,166 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import io
+import os
+import shutil
+import tarfile
+import tempfile
+import tornado.httputil
+
+import rift.package.package
+import rift.package.convert
+import rift.package.image
+import rift.package.checksums
+
+from .convert_pkg import ConvertPackage
+
+
+class ExtractError(Exception):
+    pass
+
+
+class UnreadableHeadersError(ExtractError):
+    pass
+
+
+class MissingTerminalBoundary(ExtractError):
+    pass
+
+
+class UnreadableDescriptorError(ExtractError):
+    pass
+
+
+class UnreadablePackageError(ExtractError):
+    pass
+
+
+class PackageImage(object):
+    def __init__(self, log, image_name, image_hdl, checksum=None):
+        self.name = image_name
+        self.image_hdl = image_hdl
+
+        if checksum is None:
+            log.debug("Image %s checksum not provided, calculating checksum...")
+            checksum = rift.package.checksums.checksum(self.image_hdl)
+            log.debug("Image %s checksum: %s", self.name, checksum)
+
+        self.checksum = checksum
+
+
+class UploadPackageExtractor(object):
+    def __init__(self, log):
+        self._log = log
+
+    def create_packages_from_upload(self, uploaded_file, extracted_pkgfile):
+        def create_package_from_descriptor_file(desc_hdl):
+            # Uploaded package was a plain descriptor file
+            bytes_hdl = io.BytesIO(desc_hdl.read())
+            bytes_hdl.name = uploaded_file
+            try:
+                package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                        self._log, bytes_hdl
+                        )
+            except rift.package.package.PackageError as e:
+                msg = "Could not create descriptor package from descriptor: %s" % str(e)
+                self._log.error(msg)
+                raise UnreadableDescriptorError(msg) from e
+
+            return package
+
+        def create_package_from_tar_file(tar_hdl):
+            # Uploaded package was in a .tar.gz format
+            tar_archive = rift.package.package.TarPackageArchive(
+                    self._log, tar_hdl,
+                    )
+            try:
+                package = tar_archive.create_package()
+            except rift.package.package.PackageError as e:
+                msg = "Could not create package from tar archive: %s" % str(e)
+                self._log.error(msg)
+                raise UnreadablePackageError(msg) from e
+
+            return package
+
+        self._log.info("creating package from uploaded descriptor file/package")
+        tmp_pkgs = []
+        upload_hdl = None
+        try:
+            # This file handle will be passed to TemporaryPackage to be closed
+            # and the underlying file removed.
+            upload_hdl = open(extracted_pkgfile, "r+b")
+
+            # Process the package archive
+            if tarfile.is_tarfile(extracted_pkgfile):
+                package = create_package_from_tar_file(upload_hdl)
+                tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                      package,
+                                                                      upload_hdl))
+
+            # Check if this is just a descriptor file
+            elif rift.package.convert.ProtoMessageSerializer.is_supported_file(uploaded_file):
+                package = create_package_from_descriptor_file(upload_hdl)
+                tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                      package,
+                                                                      upload_hdl))
+
+            else:
+                # See if the pacakage can be converted
+                files = ConvertPackage(self._log,
+                                       uploaded_file,
+                                       extracted_pkgfile).convert(delete=True)
+
+                if files is None or not len(files):
+                    # Not converted successfully
+                    msg = "Uploaded file was neither a tar.gz or descriptor file"
+                    self._log.error(msg)
+                    raise UnreadablePackageError(msg)
+
+                # Close the open file handle as this file is not used anymore
+                upload_hdl.close()
+
+                for f in files:
+                    self._log.debug("Upload converted file: {}".format(f))
+                    upload_hdl = open(f, "r+b")
+                    package = create_package_from_tar_file(upload_hdl)
+                    tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+                                                                          package,
+                                                                          upload_hdl))
+
+        except Exception as e:
+            # Cleanup any TemporaryPackage instances created
+            for t in tmp_pkgs:
+                t.close()
+
+            # Close the handle if not already closed
+            try:
+                if upload_hdl is not None:
+                    upload_hdl.close()
+            except OSError as e:
+                self._log.warning("Failed to close file handle: %s", str(e))
+
+            try:
+                self._log.debug("Removing extracted package file: %s", extracted_pkgfile)
+                os.remove(extracted_pkgfile)
+            except OSError as e:
+                self._log.warning("Failed to remove extracted package dir: %s", str(e))
+
+            raise e
+
+        return tmp_pkgs
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/image.py
new file mode 100644 (file)
index 0000000..ce30981
--- /dev/null
@@ -0,0 +1,74 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import itertools
+import glanceclient
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+
+from rift.imagemgr import client
+
+
+class ImageUploadError(Exception):
+    pass
+
+
+class ImageUploader(object):
+    """ This class is responsible for uploading package images to cloud accounts """
+    def __init__(self, log, loop, dts):
+        """ Create an instance of ImageUploader
+
+        Arguments:
+            log - A logger
+        """
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+
+        self._client = client.UploadJobClient(self._log, self._loop, self._dts)
+
+    def upload_image(self, image_name, image_checksum, image_hdl):
+        endpoint = "http://127.0.0.1:9999"
+        glance_client = glanceclient.Client('1', endpoint, token="asdf")
+
+        try:
+            for image in itertools.chain(
+                    glance_client.images.list(is_public=False),
+                    glance_client.images.list(is_public=True),
+                    ):
+                if image.name == image_name and image_checksum == image_checksum:
+                    self._log.debug("Found existing image in catalog, not re-uploading")
+                    return
+
+            self._log.debug('Uploading image to catalog: {}'.format(image_name))
+
+            image = glance_client.images.create(name=image_name, data=image_hdl, is_public="False",
+                                                disk_format="qcow2", container_format="bare",
+                                                checksum=image_checksum)
+            self._log.debug('Image upload complete: %s', image)
+        except Exception as e:
+            raise ImageUploadError("Failed to upload image to catalog: %s" % str(e)) from e
+
+    def upload_image_to_cloud_accounts(self, image_name, image_checksum, cloud_accounts=None):
+        self._log.debug("uploading image %s to all cloud accounts", image_name)
+        upload_job = self._client.create_job_threadsafe(image_name, image_checksum, cloud_accounts)
+        try:
+            upload_job.wait_until_complete_threadsafe()
+        except client.UploadJobError as e:
+            raise ImageUploadError("Failed to upload image (image_name) to cloud accounts") from e
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/message.py
new file mode 100644 (file)
index 0000000..a1827eb
--- /dev/null
@@ -0,0 +1,360 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+import time
+
+
+class MessageException(Exception):
+    def __init__(self, msg):
+        if not isinstance(msg, Message):
+            raise ValueError("{} is not a message".format(msg.__class__.__name__))
+
+        self.msg = msg
+
+
+class Message(object):
+    """
+    Messages are events that describe stages of the onboarding process, and
+    any event that may occur during the onboarding process.
+    """
+
+    def __init__(self, level, name, text):
+        self._level = level
+        self._name = name
+        self._text = text
+        self._timestamp = time.time()
+
+    def __repr__(self):
+        return "{} {}:{}:{}".format(
+                self.timestamp,
+                logging._levelNames.get(self.level, self.level),
+                self.name,
+                self.text,
+                )
+
+    @property
+    def level(self):
+        return self._level
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def text(self):
+        return self._text
+
+    @property
+    def timestamp(self):
+        return self._timestamp
+
+    def log(self, logger):
+        logger.log(self.level, self.text)
+
+
+class WarningMessage(Message):
+    """
+    A warning is a message that does not prevent the onboarding process for
+    continuing, but may not be the intention of the user when they initiated
+    the process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.WARNING, name, text)
+
+
+class ErrorMessage(Message):
+    """
+    An error message alerts the user to an event that prevent the continuation
+    of the onboarding process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.ERROR, name, text)
+
+
+class StatusMessage(Message):
+    """
+    A status message informs the user of an expected stage in the onboarding
+    process.
+    """
+
+    def __init__(self, name, text):
+        super().__init__(logging.INFO, name, text)
+
+
+class FilenameMessage(Message):
+    """
+    A status message informs the user of a download file.
+    """
+
+    def __init__(self, filename):
+        super().__init__(logging.INFO, 'filename', filename)
+
+
+class Logger(object):
+    """
+    This class is used to augment a python logger class so that messages can be
+    passed to it. Messages are recorded so that the uploader application can
+    provide this information to the client, and the messages are also recorded
+    on the server via the standard logging facilities.
+    """
+
+    def __init__(self, logger, messages):
+        self._rift_logger = logger
+        self._messages = messages
+
+    @property
+    def messages(self):
+        return self._messages
+
+    def message(self, msg):
+        msg.log(self._rift_logger)
+        self._messages.append(msg)
+
+    def __getattr__(self, name):
+        """ Return the rift logger attribute
+
+        By returning the rift logger attribute back to the client,
+        the line logged by rwlogger corresponds to the actual file/line
+        logged by the application instead of one in this class.  This makes
+        debugging easier and prevents rwlogd from inadvertantly triggering
+        dup detection (which uses event & line information).
+        """
+        return getattr(self._rift_logger, name)
+
+
+
+class OnboardError(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("onboard-error", msg)
+
+
+class OnboardWarning(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("onboard-warning", msg)
+
+
+class OnboardDescriptorValidation(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-dsc-validation", "descriptor validation")
+
+
+class OnboardDescriptorTimeout(OnboardError):
+    def __init__(self):
+        super().__init__("descriptor timeout")
+
+
+class OnboardDescriptorError(OnboardError):
+    def __init__(self, filename):
+        super().__init__("unable to onboard {}".format(filename))
+
+
+class OnboardDescriptorFormatError(OnboardError):
+    def __init__(self, filename):
+        super().__init__("{} has unrecognized format".format(filename))
+
+
+class OnboardMissingContentType(OnboardError):
+    def __init__(self):
+        super().__init__("missing content-type header")
+
+
+class OnboardUnsupportedMediaType(OnboardError):
+    def __init__(self):
+        super().__init__("multipart/form-data required")
+
+
+class OnboardMissingContentBoundary(OnboardError):
+    def __init__(self):
+        super().__init__("missing content boundary")
+
+
+class OnboardMissingTerminalBoundary(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to find terminal content boundary")
+
+
+class OnboardUnreadableHeaders(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to read message headers")
+
+
+class OnboardUnreadablePackage(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to read package")
+
+
+class OnboardExtractionError(OnboardError):
+    def __init__(self):
+        super().__init__("Unable to extract package contents")
+
+
+class OnboardImageUploadError(OnboardError):
+    def __init__(self, message=""):
+        super().__init__("Unable to upload images: %s" % message)
+
+
+class OnboardMissingChecksumsFile(OnboardError):
+    def __init__(self):
+        super().__init__("Package does not contain checksums.txt")
+
+
+class OnboardChecksumMismatch(OnboardError):
+    def __init__(self, filename):
+        super().__init__("checksum mismatch for {}".format(filename))
+
+
+class OnboardDescriptorExistsError(OnboardError):
+    def __init__(self, descriptor_id):
+        super().__init__("descriptor id {} already onboarded".format(descriptor_id))
+
+
+
+class OnboardStart(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-started", "onboarding process started")
+
+
+class OnboardDescriptorOnboard(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-dsc-onboard", "onboarding descriptors")
+
+
+class OnboardSuccess(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-success", "onboarding process successfully completed")
+
+
+class OnboardFailure(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-failure", "onboarding process failed")
+
+
+class OnboardPackageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-pkg-upload", "uploading package")
+
+
+class OnboardImageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-img-upload", "uploading image")
+
+
+class OnboardPackageValidation(StatusMessage):
+    def __init__(self):
+        super().__init__("onboard-pkg-validation", "package contents validation")
+
+
+
+class UpdateError(ErrorMessage):
+    def __init__(self, msg):
+        super().__init__("update-error", msg)
+
+
+class UpdateMissingContentType(UpdateError):
+    def __init__(self):
+        super().__init__("missing content-type header")
+
+
+class UpdateUnsupportedMediaType(UpdateError):
+    def __init__(self):
+        super().__init__("multipart/form-data required")
+
+
+class UpdateMissingContentBoundary(UpdateError):
+    def __init__(self):
+        super().__init__("missing content boundary")
+
+
+class UpdateDescriptorError(UpdateError):
+    def __init__(self, filename):
+        super().__init__("unable to update {}".format(filename))
+
+
+class UpdatePackageNotFoundError(UpdateError):
+    def __init__(self, descriptor_id):
+        super().__init__("package {} not found".format(descriptor_id))
+
+
+class UpdateDescriptorFormatError(UpdateError):
+    def __init__(self, filename):
+        super().__init__("{} has unrecognized format".format(filename))
+
+
+class UpdateExtractionError(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to extract package contents")
+
+
+class UpdateDescriptorTimeout(UpdateError):
+    def __init__(self):
+        super().__init__("descriptor timeout")
+
+
+class UpdateUnreadableHeaders(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to read message headers")
+
+
+class UpdateUnreadablePackage(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to read package")
+
+
+class UpdateChecksumMismatch(UpdateError):
+    def __init__(self, filename):
+        super().__init__("checksum mismatch for {}".format(filename))
+
+
+class UpdateImageUploadError(UpdateError):
+    def __init__(self):
+        super().__init__("Unable to upload images")
+
+
+class UpdateStart(StatusMessage):
+    def __init__(self):
+        super().__init__("update-started", "update process started")
+
+
+class UpdateSuccess(StatusMessage):
+    def __init__(self):
+        super().__init__("update-success", "updating process successfully completed")
+
+
+class UpdateFailure(StatusMessage):
+    def __init__(self):
+        super().__init__("update-failure", "updating process failed")
+
+
+class UpdatePackageUpload(StatusMessage):
+    def __init__(self):
+        super().__init__("update-pkg-upload", "uploading package")
+
+
+class UpdateDescriptorUpdate(StatusMessage):
+    def __init__(self):
+        super().__init__("update-dsc-onboard", "updating descriptors")
+
+
+class UpdateDescriptorUpdated(StatusMessage):
+    def __init__(self):
+        super().__init__("update-dsc-updated", "updated descriptors")
+
+
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/onboard.py
new file mode 100644 (file)
index 0000000..f777c97
--- /dev/null
@@ -0,0 +1,164 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import requests
+
+from rift.package import convert
+from gi.repository import (
+    NsdYang,
+    RwNsdYang,
+    VnfdYang,
+    RwVnfdYang,
+)
+
+
+class OnboardError(Exception):
+    pass
+
+
+class UpdateError(Exception):
+    pass
+
+
+class DescriptorOnboarder(object):
+    """ This class is responsible for onboarding descriptors using Restconf"""
+    DESC_ENDPOINT_MAP = {
+            NsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
+            RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
+            VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+            RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+            }
+
+    DESC_SERIALIZER_MAP = {
+            NsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.NsdSerializer(),
+            RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.RwNsdSerializer(),
+            VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
+            RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+            }
+
+    HEADERS = {"content-type": "application/vnd.yang.data+json"}
+    TIMEOUT_SECS = 5
+    AUTH = ('admin', 'admin')
+
+    def __init__(self, log, host="127.0.0.1", port=8008, use_ssl=False, ssl_cert=None, ssl_key=None):
+        self._log = log
+        self._host = host
+        self.port = port
+        self._use_ssl = use_ssl
+        self._ssl_cert = ssl_cert
+        self._ssl_key = ssl_key
+
+        self.timeout = DescriptorOnboarder.TIMEOUT_SECS
+
+    @classmethod
+    def _get_headers(cls, auth):
+        headers = cls.HEADERS.copy()
+        if auth is not None:
+            headers['authorization'] = auth
+
+        return headers
+
+    def _get_url(self, descriptor_msg):
+        if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
+            raise TypeError("Invalid descriptor message type")
+
+        endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
+
+        url = "{}://{}:{}/api/config/{}".format(
+                "https" if self._use_ssl else "http",
+                self._host,
+                self.port,
+                endpoint,
+                )
+
+        return url
+
+    def _make_request_args(self, descriptor_msg, auth=None):
+        if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
+            raise TypeError("Invalid descriptor message type")
+
+        serializer = DescriptorOnboarder.DESC_SERIALIZER_MAP[type(descriptor_msg)]
+        json_data = serializer.to_json_string(descriptor_msg)
+        url = self._get_url(descriptor_msg)
+
+        request_args = dict(
+            url=url,
+            data=json_data,
+            headers=self._get_headers(auth),
+            auth=DescriptorOnboarder.AUTH,
+            verify=False,
+            cert=(self._ssl_cert, self._ssl_key) if self._use_ssl else None,
+            timeout=self.timeout,
+        )
+
+        return request_args
+
+    def update(self, descriptor_msg, auth=None):
+        """ Update the descriptor config
+
+        Arguments:
+            descriptor_msg - A descriptor proto-gi msg
+            auth - the authorization header
+
+        Raises:
+            UpdateError - The descriptor config update failed
+        """
+        request_args = self._make_request_args(descriptor_msg, auth)
+        try:
+            response = requests.put(**request_args)
+            response.raise_for_status()
+        except requests.exceptions.ConnectionError as e:
+            msg = "Could not connect to restconf endpoint: %s" % str(e)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+        except requests.exceptions.HTTPError as e:
+            msg = "PUT request to %s error: %s" % (request_args["url"], response.text)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+        except requests.exceptions.Timeout as e:
+            msg = "Timed out connecting to restconf endpoint: %s", str(e)
+            self._log.error(msg)
+            raise UpdateError(msg) from e
+
+    def onboard(self, descriptor_msg, auth=None):
+        """ Onboard the descriptor config
+
+        Arguments:
+            descriptor_msg - A descriptor proto-gi msg
+            auth - the authorization header
+
+        Raises:
+            OnboardError - The descriptor config update failed
+        """
+
+        request_args = self._make_request_args(descriptor_msg, auth)
+        try:
+            response = requests.post(**request_args)
+            response.raise_for_status()
+        except requests.exceptions.ConnectionError as e:
+            msg = "Could not connect to restconf endpoint: %s" % str(e)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+        except requests.exceptions.HTTPError as e:
+            msg = "POST request to %s error: %s" % (request_args["url"], response.text)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+        except requests.exceptions.Timeout as e:
+            msg = "Timed out connecting to restconf endpoint: %s", str(e)
+            self._log.error(msg)
+            raise OnboardError(msg) from e
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/state.py
new file mode 100644 (file)
index 0000000..0028c12
--- /dev/null
@@ -0,0 +1,107 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import tornado.web
+
+from . import message
+
+
+class StateHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+    def initialize(self, log, loop):
+        self.log = log
+        self.loop = loop
+
+    def success(self, messages):
+        success = self.__class__.SUCCESS
+        return any(isinstance(msg, success) for msg in messages)
+
+    def failure(self, messages):
+        failure = self.__class__.FAILURE
+        return any(isinstance(msg, failure) for msg in messages)
+
+    def started(self, messages):
+        started = self.__class__.STARTED
+        return any(isinstance(msg, started) for msg in messages)
+
+    def status(self, messages):
+        if self.failure(messages):
+            return "failure"
+        elif self.success(messages):
+            return "success"
+        return "pending"
+
+    def notifications(self, messages):
+        notifications = {
+                "errors": list(),
+                "events": list(),
+                "warnings": list(),
+                }
+
+        for msg in messages:
+            if isinstance(msg, message.StatusMessage):
+                notifications["events"].append({
+                    'value': msg.name,
+                    'text': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.WarningMessage):
+                notifications["warnings"].append({
+                    'value': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.ErrorMessage):
+                notifications["errors"].append({
+                    'value': msg.text,
+                    'timestamp': msg.timestamp,
+                    })
+                continue
+
+            elif isinstance(msg, message.FilenameMessage):
+                notifications["filename"] = msg.text
+                continue
+
+            self.log.warning('unrecognized message: {}'.format(msg))
+
+        return notifications
+
+    def get(self, transaction_id):
+        if transaction_id not in self.application.messages:
+            raise tornado.web.HTTPError(404, "unrecognized transaction ID")
+
+        messages = self.application.messages[transaction_id]
+        messages.sort(key=lambda m: m.timestamp)
+
+        if not self.started(messages):
+            raise tornado.web.HTTPError(404, "unrecognized transaction ID")
+
+        notifications = self.notifications(messages)
+        notifications["status"] = self.status(messages)
+
+        self.write(tornado.escape.json_encode(notifications))
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tasklet.py
new file mode 100644 (file)
index 0000000..ca09d33
--- /dev/null
@@ -0,0 +1,457 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+import tornado
+import tornado.httputil
+import tornado.httpserver
+import tornado.platform.asyncio
+
+import tornadostreamform.multipart_streamer as multipart_streamer
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwLaunchpadYang as rwlaunchpad,
+    RwcalYang as rwcal,
+    RwTypes,
+)
+
+import rift.tasklets
+import rift.mano.cloud
+import rift.mano.config_agent
+from rift.package import store
+
+from . import uploader
+from . import datacenters
+
+MB = 1024 * 1024
+GB = 1024 * MB
+TB = 1024 * GB
+
+MAX_BUFFER_SIZE = 1 * MB  # Max. size loaded into memory!
+MAX_BODY_SIZE = 1 * MB  # Max. size loaded into memory!
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+    # Unforunately, it is currently difficult to figure out what has exactly
+    # changed in this xact without Pbdelta support (RIFT-4916)
+    # As a workaround, we can fetch the pre and post xact elements and
+    # perform a comparison to figure out adds/deletes/updates
+    xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+    curr_cfgs = list(dts_member_reg.elements)
+
+    xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+    curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+    # Find Adds
+    added_keys = set(xact_key_map) - set(curr_key_map)
+    added_cfgs = [xact_key_map[key] for key in added_keys]
+
+    # Find Deletes
+    deleted_keys = set(curr_key_map) - set(xact_key_map)
+    deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+    # Find Updates
+    updated_keys = set(curr_key_map) & set(xact_key_map)
+    updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+    return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class CatalogDtsHandler(object):
+    def __init__(self, tasklet, app):
+        self.app = app
+        self.reg = None
+        self.tasklet = tasklet
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def dts(self):
+        return self.tasklet.dts
+
+
+class NsdCatalogDtsHandler(CatalogDtsHandler):
+    XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+
+    def add_nsd(self, nsd):
+        self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id))
+        if nsd.id not in self.tasklet.nsd_catalog:
+            self.tasklet.nsd_catalog[nsd.id] = nsd
+        else:
+            self.log.error("nsd already in catalog: {}".format(nsd.id))
+
+    def update_nsd(self, nsd):
+        self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id))
+        if nsd.id in self.tasklet.nsd_catalog:
+            self.tasklet.nsd_catalog[nsd.id] = nsd
+        else:
+            self.log.error("unrecognized NSD: {}".format(nsd.id))
+
+    def delete_nsd(self, nsd_id):
+        self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id))
+        if nsd_id in self.tasklet.nsd_catalog:
+            del self.tasklet.nsd_catalog[nsd_id]
+        else:
+            self.log.error("unrecognized NSD: {}".format(nsd_id))
+
+        try:
+            self.tasklet.nsd_package_store.delete_package(nsd_id)
+        except store.PackageStoreError as e:
+            self.log.warning("could not delete package from store: %s", str(e))
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name="id",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_nsd(cfg.id)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_nsd(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_nsd(cfg)
+
+        self.log.debug("Registering for NSD catalog")
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=NsdCatalogDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+
+class VnfdCatalogDtsHandler(CatalogDtsHandler):
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def add_vnfd(self, vnfd):
+        self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id))
+        if vnfd.id not in self.tasklet.vnfd_catalog:
+            self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+
+        else:
+            self.log.error("VNFD already in catalog: {}".format(vnfd.id))
+
+    def update_vnfd(self, vnfd):
+        self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id))
+        if vnfd.id in self.tasklet.vnfd_catalog:
+            self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+
+        else:
+            self.log.error("unrecognized VNFD: {}".format(vnfd.id))
+
+    def delete_vnfd(self, vnfd_id):
+        self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id))
+        if vnfd_id in self.tasklet.vnfd_catalog:
+            del self.tasklet.vnfd_catalog[vnfd_id]
+        else:
+            self.log.error("unrecognized VNFD: {}".format(vnfd_id))
+
+        try:
+            self.tasklet.vnfd_package_store.delete_package(vnfd_id)
+        except store.PackageStoreError as e:
+            self.log.warning("could not delete package from store: %s", str(e))
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle.  Skipping apply config")
+                return
+
+            add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+                    dts_member_reg=self.reg,
+                    xact=xact,
+                    key_name="id",
+                    )
+
+            # Handle Deletes
+            for cfg in delete_cfgs:
+                self.delete_vnfd(cfg.id)
+
+            # Handle Adds
+            for cfg in add_cfgs:
+                self.add_vnfd(cfg)
+
+            # Handle Updates
+            for cfg in update_cfgs:
+                self.update_vnfd(cfg)
+
+        self.log.debug("Registering for VNFD catalog")
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=VnfdCatalogDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+class CfgAgentAccountHandlers(object):
+    def __init__(self, dts, log, log_hdl, loop):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+
+        self._log.debug("creating config agent account config handler")
+        self.cfg_agent_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
+            self._dts, self._log,
+            rift.mano.config_agent.ConfigAgentCallbacks(
+                on_add_apply=self.on_cfg_agent_account_added,
+                on_delete_apply=self.on_cfg_agent_account_deleted,
+            )
+        )
+
+        self._log.debug("creating config agent account opdata handler")
+        self.cfg_agent_operdata_handler = rift.mano.config_agent.CfgAgentDtsOperdataHandler(
+            self._dts, self._log, self._loop,
+        )
+
+    def on_cfg_agent_account_deleted(self, account):
+        self._log.debug("config agent account deleted")
+        self.cfg_agent_operdata_handler.delete_cfg_agent_account(account.name)
+
+    def on_cfg_agent_account_added(self, account):
+        self._log.debug("config agent account added")
+        self.cfg_agent_operdata_handler.add_cfg_agent_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        self.cfg_agent_cfg_handler.register()
+        yield from self.cfg_agent_operdata_handler.register()
+
+class CloudAccountHandlers(object):
+    def __init__(self, dts, log, log_hdl, loop, app):
+        self._log = log
+        self._log_hdl = log_hdl
+        self._dts = dts
+        self._loop = loop
+        self._app = app
+
+        self._log.debug("creating cloud account config handler")
+        self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber(
+            self._dts, self._log, self._log_hdl,
+            rift.mano.cloud.CloudAccountConfigCallbacks(
+                on_add_apply=self.on_cloud_account_added,
+                on_delete_apply=self.on_cloud_account_deleted,
+            )
+        )
+
+        self._log.debug("creating cloud account opdata handler")
+        self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler(
+            self._dts, self._log, self._loop,
+        )
+
+    def on_cloud_account_deleted(self, account_name):
+        self._log.debug("cloud account deleted")
+        self._app.accounts.clear()
+        self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+        self.cloud_operdata_handler.delete_cloud_account(account_name)
+
+    def on_cloud_account_added(self, account):
+        self._log.debug("cloud account added")
+        self._app.accounts.clear()
+        self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+        self._log.debug("accounts: %s", self._app.accounts)
+        self.cloud_operdata_handler.add_cloud_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        self.cloud_cfg_handler.register()
+        yield from self.cloud_operdata_handler.register()
+
+
+class LaunchpadTasklet(rift.tasklets.Tasklet):
+    UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
+    UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
+    UPLOAD_PORT = "4567"
+
+    def __init__(self, *args, **kwargs):
+        super(LaunchpadTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("launchpad")
+
+        self.app = None
+        self.server = None
+
+        self.account_handler = None
+        self.config_handler = None
+        self.nsd_catalog_handler = None
+        self.vld_catalog_handler = None
+        self.vnfd_catalog_handler = None
+        self.cloud_handler = None
+        self.datacenter_handler = None
+        self.lp_config_handler = None
+
+        self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
+        self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
+
+        self.nsd_catalog = dict()
+        self.vld_catalog = dict()
+        self.vnfd_catalog = dict()
+
+    @property
+    def cloud_accounts(self):
+        if self.cloud_handler is None:
+            return list()
+
+        return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+
+    def start(self):
+        super(LaunchpadTasklet, self).start()
+        self.log.info("Starting LaunchpadTasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                rwlaunchpad.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+        try:
+            self.server.stop()
+            self.dts.deinit()
+        except Exception:
+            self.log.exception("Caught Exception in LP stop")
+            raise
+
+    @asyncio.coroutine
+    def init(self):
+        io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.app = uploader.UploaderApplication(self)
+
+        manifest = self.tasklet_info.get_pb_manifest()
+        ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        ssl_key = manifest.bootstrap_phase.rwsecurity.key
+        ssl_options = {
+                "certfile": ssl_cert,
+                "keyfile": ssl_key,
+                }
+
+        if manifest.bootstrap_phase.rwsecurity.use_ssl:
+            self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+                io_loop=io_loop,
+                ssl_options=ssl_options,
+            )
+
+        else:
+            self.server = tornado.httpserver.HTTPServer(
+                self.app,
+                max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+                io_loop=io_loop,
+            )
+
+        self.log.debug("creating NSD catalog handler")
+        self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app)
+        yield from self.nsd_catalog_handler.register()
+
+        self.log.debug("creating VNFD catalog handler")
+        self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app)
+        yield from self.vnfd_catalog_handler.register()
+
+        self.log.debug("creating datacenter handler")
+        self.datacenter_handler = datacenters.DataCenterPublisher(self)
+        yield from self.datacenter_handler.register()
+
+        self.log.debug("creating cloud account handler")
+        self.cloud_handler = CloudAccountHandlers(
+                self.dts, self.log, self.log_hdl, self.loop, self.app
+                )
+        yield from self.cloud_handler.register()
+
+        self.log.debug("creating config agent handler")
+        self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl, self.loop)
+        yield from self.config_handler.register()
+
+    @asyncio.coroutine
+    def run(self):
+        self.server.listen(LaunchpadTasklet.UPLOAD_PORT)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/tosca.py
new file mode 100644 (file)
index 0000000..8ccc899
--- /dev/null
@@ -0,0 +1,240 @@
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+import uuid
+import zipfile
+
+from rift.mano.tosca_translator.shell import TranslatorShell
+from rift.mano.yang_translator.rwmano.yang_translator import YangTranslator
+
+
+class ToscaPackageError(Exception):
+    pass
+
+
+class ToscaPackageReadError(Exception):
+    pass
+
+
+class InvalidToscaPackageError(ToscaPackageError):
+    pass
+
+
+class ToscaTranslateError(ToscaPackageError):
+    pass
+
+
+class YangTranslateError(Exception):
+    pass
+
+
+class ToscaArchiveCreateError(YangTranslateError):
+    pass
+
+
+class YangTranslateNsdError(YangTranslateError):
+    pass
+
+
+class ExportTosca(object):
+    def __init__(self, log=None):
+        if log is None:
+            self.log = logging.getLogger("rw-mano-log")
+        else:
+            self.log = log
+        self.nsds = {}
+        self.csars = list()
+
+    def add_image(self, nsd_id, image, chksum=None):
+        if image.name not in self.images:
+            self.images[image.name] = image
+
+    def add_vld(self, nsd_id, vld, pkg=None):
+        if not 'vlds' in self.nsds[nsd_id]:
+            self.nsds[nsd_id]['vlds'] = []
+        self.nsds[nsd_id]['vlds'].append(vld)
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+
+    def add_vnfd(self, nsd_id, vnfd, pkg=None):
+        if not 'vnfds' in self.nsds[nsd_id]:
+            self.nsds[nsd_id]['vnfds'] = []
+        self.nsds[nsd_id]['vnfds'].append(vnfd)
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+
+    def add_nsd(self, nsd, pkg=None):
+        nsd_id = str(uuid.uuid4())
+        self.nsds[nsd_id] = {'nsd': nsd}
+        self.nsds[nsd_id]['pkgs'] = []
+        if pkg:
+            self.nsds[nsd_id]['pkgs'].append(pkg)
+        return nsd_id
+
+    def create_csar(self, nsd_id, dest=None):
+        if dest is None:
+            dest = tempfile.mkdtemp()
+
+        # Convert YANG to dict
+        yangs = {}
+        yangs['vnfd'] = []
+        for vnfd in self.nsds[nsd_id]['vnfds']:
+            yangs['vnfd'].append(vnfd.as_dict())
+            self.log.debug("Translate VNFD: {}".format(vnfd.as_dict()))
+        yangs['nsd'] = []
+        yangs['nsd'].append(self.nsds[nsd_id]['nsd'].as_dict())
+        self.log.debug("Translate NSD : {}".format(yangs['nsd']))
+
+        # Translate YANG model to TOSCA template
+        translator = YangTranslator(self.log,
+                                    yangs=yangs,
+                                    packages=self.nsds[nsd_id]['pkgs'])
+        output = translator.translate()
+        self.csars.extend(translator.write_output(output,
+                                                  output_dir=dest,
+                                                  archive=True))
+        self.log.debug("Created CSAR archive {}".format(self.csars[-1]))
+
+    def create_archive(self, archive_name, dest=None):
+        if not len(self.nsds):
+            self.log.error("Did not find any NSDs to export")
+            return
+
+        if dest is None:
+            dest = tempfile.mkdtemp()
+
+        prevdir = os.getcwd()
+
+        if not os.path.exists(dest):
+            os.makedirs(dest)
+
+        try:
+            # Convert each NSD to a TOSCA template
+            for nsd_id in self.nsds:
+                # Not passing the dest dir to prevent clash in case
+                # multiple export of the same desc happens
+                self.create_csar(nsd_id)
+
+        except Exception as e:
+            msg = "Exception converting NSD {}: {}".format(nsd_id, e)
+            self.log.exception(e)
+            raise YangTranslateNsdError(msg)
+
+        os.chdir(dest)
+
+        try:
+            if archive_name.endswith(".zip"):
+                archive_name = archive_name[:-4]
+
+            archive_path = os.path.join(dest, archive_name)
+
+            # Construct a zip of the csar archives
+            zip_name = '{}.zip'.format(archive_path)
+
+            if len(self.csars) == 1:
+                # Only 1 TOSCA template, just rename csar if required
+                if self.csars[0] != zip_name:
+                    mv_cmd = "mv {} {}".format(self.csars[0], zip_name)
+                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)
+                    # Remove the temporary directory created
+                    shutil.rmtree(os.path.dirname(self.csars[0]))
+
+            else:
+                with zipfile.ZipFile('{}.partial'.format(zip_name), 'w') as zf:
+                    for csar in self.csars:
+                        # Move file to the current dest dir
+                        if os.path.dirname(csar) != dest:
+                            file_mv = "mv {} {}".format(csar, dest)
+                            subprocess.check_call(file_mv,
+                                                  shell=True,
+                                                  stdout=subprocess.DEVNULL)
+                            # Remove the temporary directory created
+                            shutil.rmtree(os.path.dirname(csar))
+
+                        csar_f = os.basename(csar)
+                        # Now add to the archive
+                        zf.write(csar_f)
+                        # Remove the csar file
+                        os.remove(csar_f)
+
+                    # Rename archive to final name
+                    mv_cmd = "mv {0}.partial {0}".format(zip_name)
+                    subprocess.check_call(mv_cmd, shell=True, stdout=subprocess.DEVNULL)
+
+            return zip_name
+
+        except Exception as e:
+            msg = "Creating CSAR archive failed: {0}".format(e)
+            self.log.exception(e)
+            raise YangTranslateError(msg)
+
+        finally:
+            os.chdir(prevdir)
+
+class ImportTosca(object):
+
+    def __init__(self, log, in_file, out_dir=None):
+        if log is None:
+            self.log = logging.getLogger("rw-mano-log")
+        else:
+            self.log = log
+        self.log = log
+        self.in_file = in_file
+        self.out_dir = out_dir
+
+    def translate(self):
+        # Check if the input file is a zip file
+        if not zipfile.is_zipfile(self.in_file):
+            err_msg = "{} is not a zip file.".format(self.in_file)
+            self.log.error(err_msg)
+            raise InvalidToscaPackageError(err_msg)
+
+        try:
+            # Store the current working directory
+            prevdir = os.getcwd()
+
+            # See if we need to create a output directory
+            out_dir = self.out_dir
+            if out_dir is None:
+                out_dir = tempfile.mkdtemp()
+
+            # Call the TOSCA translator
+            self.log.debug("Calling tosca-translator for {}".
+                           format(self.in_file))
+            return TranslatorShell(self.log).translate(self.in_file,
+                                                       out_dir,
+                                                       archive=True)
+
+        except Exception as e:
+            self.log.exception(e)
+            raise ToscaTranslateError("Error translating TOSCA package {}: {}".
+                                      format(self.in_file, e))
+
+        finally:
+                os.chdir(prevdir)
+
+    @staticmethod
+    def is_tosca_package(in_file):
+        if zipfile.is_zipfile(in_file):
+            return True
+        else:
+            return False
+
+
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rift/tasklets/rwlaunchpad/uploader.py
new file mode 100644 (file)
index 0000000..081c1f5
--- /dev/null
@@ -0,0 +1,881 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import os
+import threading
+import uuid
+import zlib
+
+import tornado
+import tornado.escape
+import tornado.ioloop
+import tornado.web
+import tornado.httputil
+import tornadostreamform.multipart_streamer as multipart_streamer
+
+import requests
+
+# disable unsigned certificate warning
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+import gi
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+import rift.mano.cloud
+
+import rift.package.charm
+import rift.package.checksums
+import rift.package.config
+import rift.package.convert
+import rift.package.icon
+import rift.package.package
+import rift.package.script
+import rift.package.store
+
+from . import (
+        export,
+        extract,
+        image,
+        message,
+        onboard,
+        state,
+        )
+
+from .message import (
+        MessageException,
+
+        # Onboard Error Messages
+        OnboardChecksumMismatch,
+        OnboardDescriptorError,
+        OnboardDescriptorExistsError,
+        OnboardDescriptorFormatError,
+        OnboardError,
+        OnboardExtractionError,
+        OnboardImageUploadError,
+        OnboardMissingContentBoundary,
+        OnboardMissingContentType,
+        OnboardMissingTerminalBoundary,
+        OnboardUnreadableHeaders,
+        OnboardUnreadablePackage,
+        OnboardUnsupportedMediaType,
+
+        # Onboard Status Messages
+        OnboardDescriptorOnboard,
+        OnboardFailure,
+        OnboardImageUpload,
+        OnboardPackageUpload,
+        OnboardPackageValidation,
+        OnboardStart,
+        OnboardSuccess,
+
+
+        # Update Error Messages
+        UpdateChecksumMismatch,
+        UpdateDescriptorError,
+        UpdateDescriptorFormatError,
+        UpdateError,
+        UpdateExtractionError,
+        UpdateImageUploadError,
+        UpdateMissingContentBoundary,
+        UpdateMissingContentType,
+        UpdatePackageNotFoundError,
+        UpdateUnreadableHeaders,
+        UpdateUnreadablePackage,
+        UpdateUnsupportedMediaType,
+
+        # Update Status Messages
+        UpdateDescriptorUpdate,
+        UpdateDescriptorUpdated,
+        UpdatePackageUpload,
+        UpdateStart,
+        UpdateSuccess,
+        UpdateFailure,
+        )
+
+from .tosca import ExportTosca
+
+MB = 1024 * 1024
+GB = 1024 * MB
+
+MAX_STREAMED_SIZE = 5 * GB
+
+
+class HttpMessageError(Exception):
+    def __init__(self, code, msg):
+        self.code = code
+        self.msg = msg
+
+
+class GzipTemporaryFileStreamedPart(multipart_streamer.TemporaryFileStreamedPart):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        # Create a decompressor for gzip data to decompress on the fly during upload
+        # http://stackoverflow.com/questions/2423866/python-decompressing-gzip-chunk-by-chunk
+        self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+    def feed(self, data):
+        decompressed_data = self._decompressor.decompress(data)
+        if decompressed_data:
+            super().feed(decompressed_data)
+
+    def finalize(self):
+        # All data has arrived, flush the decompressor to get any last decompressed data
+        decompressed_data = self._decompressor.flush()
+        super().feed(decompressed_data)
+        super().finalize()
+
+
+class GzipMultiPartStreamer(multipart_streamer.MultiPartStreamer):
+    """ This Multipart Streamer decompresses gzip files on the fly during multipart upload """
+
+    @staticmethod
+    def _get_descriptor_name_from_headers(headers):
+        descriptor_filename = None
+
+        for entry in headers:
+            if entry["value"] != "form-data":
+                continue
+
+            form_data_params = entry["params"]
+            if "name" in form_data_params:
+                if form_data_params["name"] != "descriptor":
+                    continue
+
+                if "filename" not in form_data_params:
+                    continue
+
+                descriptor_filename = form_data_params["filename"]
+
+        return descriptor_filename
+
+    def create_part(self, headers):
+        """ Create the StreamedPart subclass depending on the descriptor filename
+
+        For gzipped descriptor packages, create a GzipTemporaryFileStreamedPart which
+        can decompress the gzip while it's being streamed into the launchpad directely
+        into a file.
+
+        Returns:
+            The descriptor filename
+        """
+        filename = GzipMultiPartStreamer._get_descriptor_name_from_headers(headers)
+        if filename is None or not filename.endswith(".gz"):
+            return multipart_streamer.TemporaryFileStreamedPart(self, headers)
+
+        return GzipTemporaryFileStreamedPart(self, headers)
+
+
+class RequestHandler(tornado.web.RequestHandler):
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
+
+
+@tornado.web.stream_request_body
+class StreamingUploadHandler(RequestHandler):
+    def initialize(self, log, loop):
+        """Initialize the handler
+
+        Arguments:
+            log  - the logger that this handler should use
+            loop - the tasklets ioloop
+
+        """
+        self.transaction_id = str(uuid.uuid4())
+
+        self.loop = loop
+        self.log = self.application.get_logger(self.transaction_id)
+
+        self.part_streamer = None
+
+        self.log.debug('created handler (transaction_id = {})'.format(self.transaction_id))
+
+    def msg_missing_content_type(self):
+        raise NotImplementedError()
+
+    def msg_unsupported_media_type(self):
+        raise NotImplementedError()
+
+    def msg_missing_content_boundary(self):
+        raise NotImplementedError()
+
+    def msg_start(self):
+        raise NotImplementedError()
+
+    def msg_success(self):
+        raise NotImplementedError()
+
+    def msg_failure(self):
+        raise NotImplementedError()
+
+    def msg_package_upload(self):
+        raise NotImplementedError()
+
+    @tornado.gen.coroutine
+    def prepare(self):
+        """Prepare the handler for a request
+
+        The prepare function is the first part of a request transaction. It
+        creates a temporary file that uploaded data can be written to.
+
+        """
+        if self.request.method != "POST":
+            return
+
+        self.request.connection.set_max_body_size(MAX_STREAMED_SIZE)
+
+        self.log.message(self.msg_start())
+
+        try:
+            # Retrieve the content type and parameters from the request
+            content_type = self.request.headers.get('content-type', None)
+            if content_type is None:
+                raise HttpMessageError(400, self.msg_missing_content_type())
+
+            content_type, params = tornado.httputil._parse_header(content_type)
+
+            if "multipart/form-data" != content_type.lower():
+                raise HttpMessageError(415, self.msg_unsupported_media_type())
+
+            if "boundary" not in params:
+                raise HttpMessageError(400, self.msg_missing_content_boundary())
+
+            # You can get the total request size from the headers.
+            try:
+                total = int(self.request.headers.get("Content-Length", "0"))
+            except KeyError:
+                self.log.warning("Content length header not found")
+                # For any well formed browser request, Content-Length should have a value.
+                total = 0
+
+            # And here you create a streamer that will accept incoming data
+            self.part_streamer = GzipMultiPartStreamer(total)
+
+        except HttpMessageError as e:
+            self.log.message(e.msg)
+            self.log.message(self.msg_failure())
+
+            raise tornado.web.HTTPError(e.code, e.msg.name)
+
+        except Exception as e:
+            self.log.exception(e)
+            self.log.message(self.msg_failure())
+
+    @tornado.gen.coroutine
+    def data_received(self, chunk):
+        """Write data to the current file
+
+        Arguments:
+            data - a chunk of data to write to file
+
+        """
+
+        """When a chunk of data is received, we forward it to the multipart streamer."""
+        self.part_streamer.data_received(chunk)
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        # You MUST call this to close the incoming stream.
+        self.part_streamer.data_complete()
+
+        desc_parts = self.part_streamer.get_parts_by_name("descriptor")
+        if len(desc_parts) != 1:
+            raise HttpMessageError(400, OnboardError("Descriptor option not found"))
+
+        self.log.message(self.msg_package_upload())
+
+
+class UploadHandler(StreamingUploadHandler):
+    """
+    This handler is used to upload archives that contain VNFDs, NSDs, and PNFDs
+    to the launchpad. This is a streaming handler that writes uploaded archives
+    to disk without loading them all into memory.
+    """
+
+    def msg_missing_content_type(self):
+        return OnboardMissingContentType()
+
+    def msg_unsupported_media_type(self):
+        return OnboardUnsupportedMediaType()
+
+    def msg_missing_content_boundary(self):
+        return OnboardMissingContentBoundary()
+
+    def msg_start(self):
+        return OnboardStart()
+
+    def msg_success(self):
+        return OnboardSuccess()
+
+    def msg_failure(self):
+        return OnboardFailure()
+
+    def msg_package_upload(self):
+        return OnboardPackageUpload()
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        try:
+            super().post()
+            self.application.onboard(
+                    self.part_streamer,
+                    self.transaction_id,
+                    auth=self.request.headers.get('authorization', None),
+                    )
+
+            self.set_status(200)
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                    }))
+
+        except Exception:
+            self.log.exception("Upload POST failed")
+            self.part_streamer.release_parts()
+            raise
+
+
+class UpdateHandler(StreamingUploadHandler):
+    def msg_missing_content_type(self):
+        return UpdateMissingContentType()
+
+    def msg_unsupported_media_type(self):
+        return UpdateUnsupportedMediaType()
+
+    def msg_missing_content_boundary(self):
+        return UpdateMissingContentBoundary()
+
+    def msg_start(self):
+        return UpdateStart()
+
+    def msg_success(self):
+        return UpdateSuccess()
+
+    def msg_failure(self):
+        return UpdateFailure()
+
+    def msg_package_upload(self):
+        return UpdatePackageUpload()
+
+    def post(self):
+        """Handle a post request
+
+        The function is called after any data associated with the body of the
+        request has been received.
+
+        """
+        try:
+            super().post()
+
+            self.application.update(
+                    self.part_streamer,
+                    self.transaction_id,
+                    auth=self.request.headers.get('authorization', None),
+                    )
+
+            self.set_status(200)
+            self.write(tornado.escape.json_encode({
+                "transaction_id": self.transaction_id,
+                    }))
+        except Exception:
+            self.log.exception("Upload POST failed")
+            self.part_streamer.release_parts()
+            raise
+
+
+class UploadStateHandler(state.StateHandler):
+    STARTED = OnboardStart
+    SUCCESS = OnboardSuccess
+    FAILURE = OnboardFailure
+
+
+class UpdateStateHandler(state.StateHandler):
+    STARTED = UpdateStart
+    SUCCESS = UpdateSuccess
+    FAILURE = UpdateFailure
+
+
+class UpdatePackage(threading.Thread):
+    def __init__(self, log, loop, part_streamer, auth,
+                 onboarder, uploader, package_store_map):
+        super().__init__()
+        self.log = log
+        self.loop = loop
+        self.part_streamer = part_streamer
+        self.auth = auth
+        self.onboarder = onboarder
+        self.uploader = uploader
+        self.package_store_map = package_store_map
+
+        self.io_loop = tornado.ioloop.IOLoop.current()
+
+    def _update_package(self):
+        # Extract package could return multiple packages if
+        # the package is converted
+        for pkg in self.extract_package():
+            with pkg as temp_package:
+                package_checksums = self.validate_package(temp_package)
+                stored_package = self.update_package(temp_package)
+
+                try:
+                    self.extract_charms(temp_package)
+                    self.extract_scripts(temp_package)
+                    self.extract_configs(temp_package)
+                    self.extract_icons(temp_package)
+
+                    self.update_descriptors(temp_package)
+
+                except Exception:
+                    self.delete_stored_package(stored_package)
+                    raise
+
+                else:
+                    self.upload_images(temp_package, package_checksums)
+
+    def run(self):
+        try:
+            self._update_package()
+            self.log.message(UpdateSuccess())
+
+        except MessageException as e:
+            self.log.message(e.msg)
+            self.log.message(UpdateFailure())
+
+        except Exception as e:
+            self.log.exception(e)
+            if str(e):
+                self.log.message(UpdateError(str(e)))
+            self.log.message(UpdateFailure())
+
+    def extract_package(self):
+        """Extract multipart message from tarball"""
+        desc_part = self.part_streamer.get_parts_by_name("descriptor")[0]
+
+        # Invoke the move API to prevent the part streamer from attempting
+        # to clean up (the file backed package will do that itself)
+        desc_part.move(desc_part.f_out.name)
+
+        package_name = desc_part.get_filename()
+        package_path = desc_part.f_out.name
+
+        extractor = extract.UploadPackageExtractor(self.log)
+        file_backed_packages = extractor.create_packages_from_upload(
+                package_name, package_path
+                )
+
+        return file_backed_packages
+
+    def get_package_store(self, package):
+        return self.package_store_map[package.descriptor_type]
+
+    def update_package(self, package):
+        store = self.get_package_store(package)
+
+        try:
+            store.update_package(package)
+        except rift.package.store.PackageNotFoundError as e:
+            # If the package doesn't exist, then it is possible the descriptor was onboarded
+            # out of band.  In that case, just store the package as is
+            self.log.warning("Package not found, storing new package instead.")
+            store.store_package(package)
+
+        stored_package = store.get_package(package.descriptor_id)
+
+        return stored_package
+
+    def delete_stored_package(self, package):
+        self.log.info("Deleting stored package: %s", package)
+        store = self.get_package_store(package)
+        try:
+            store.delete_package(package.descriptor_id)
+        except Exception as e:
+            self.log.warning("Failed to delete package from store: %s", str(e))
+
+    def upload_images(self, package, package_checksums):
+        image_file_map = rift.package.image.get_package_image_files(package)
+        name_hdl_map = {name: package.open(image_file_map[name]) for name in image_file_map}
+        if not image_file_map:
+            return
+
+        try:
+            for image_name, image_hdl in name_hdl_map.items():
+                image_file = image_file_map[image_name]
+                if image_file in package_checksums:
+                    image_checksum = package_checksums[image_file]
+                else:
+                    self.log.warning("checksum not provided for image %s.  Calculating checksum",
+                                     image_file)
+                    image_checksum = rift.package.checksums.checksum(
+                            package.open(image_file_map[image_name])
+                            )
+                try:
+                    self.uploader.upload_image(image_name, image_checksum, image_hdl)
+                    self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+
+                except image.ImageUploadError as e:
+                    self.log.exception("Failed to upload image: %s", image_name)
+                    raise MessageException(OnboardImageUploadError(str(e))) from e
+
+        finally:
+            _ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
+
+
+    def extract_charms(self, package):
+        try:
+            charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
+            charm_extractor.extract_charms(package)
+        except rift.package.charm.CharmExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_scripts(self, package):
+        try:
+            script_extractor = rift.package.script.PackageScriptExtractor(self.log)
+            script_extractor.extract_scripts(package)
+        except rift.package.script.ScriptExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_configs(self, package):
+        try:
+            config_extractor = rift.package.config.PackageConfigExtractor(self.log)
+            config_extractor.extract_configs(package)
+        except rift.package.config.ConfigExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def extract_icons(self, package):
+        try:
+            icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
+            icon_extractor.extract_icons(package)
+        except rift.package.icon.IconExtractionError as e:
+            raise MessageException(UpdateExtractionError()) from e
+
+    def validate_package(self, package):
+        checksum_validator = rift.package.package.PackageChecksumValidator(self.log)
+
+        try:
+            file_checksums = checksum_validator.validate(package)
+        except rift.package.package.PackageFileChecksumError as e:
+            raise MessageException(UpdateChecksumMismatch(e.filename)) from e
+        except rift.package.package.PackageValidationError as e:
+            raise MessageException(UpdateUnreadablePackage()) from e
+
+        return file_checksums
+
+    def update_descriptors(self, package):
+        descriptor_msg = package.descriptor_msg
+
+        self.log.message(UpdateDescriptorUpdate())
+
+        try:
+            self.onboarder.update(descriptor_msg)
+        except onboard.UpdateError as e:
+            raise MessageException(UpdateDescriptorError(package.descriptor_file)) from e
+
+
+class OnboardPackage(threading.Thread):
+    def __init__(self, log, loop, part_streamer, auth,
+                 onboarder, uploader, package_store_map):
+        super().__init__()
+        self.log = log
+        self.loop = loop
+        self.part_streamer = part_streamer
+        self.auth = auth
+        self.onboarder = onboarder
+        self.uploader = uploader
+        self.package_store_map = package_store_map
+
+        self.io_loop = tornado.ioloop.IOLoop.current()
+
+    def _onboard_package(self):
+        # Extract package could return multiple packages if
+        # the package is converted
+        for pkg in self.extract_package():
+            with pkg as temp_package:
+                package_checksums = self.validate_package(temp_package)
+                stored_package = self.store_package(temp_package)
+
+                try:
+                    self.extract_charms(temp_package)
+                    self.extract_scripts(temp_package)
+                    self.extract_configs(temp_package)
+                    self.extract_icons(temp_package)
+
+                    self.onboard_descriptors(temp_package)
+
+                except Exception:
+                    self.delete_stored_package(stored_package)
+                    raise
+
+                else:
+                    self.upload_images(temp_package, package_checksums)
+
+    def run(self):
+        try:
+            self._onboard_package()
+            self.log.message(OnboardSuccess())
+
+        except MessageException as e:
+            self.log.message(e.msg)
+            self.log.message(OnboardFailure())
+
+        except Exception as e:
+            self.log.exception(e)
+            if str(e):
+                self.log.message(OnboardError(str(e)))
+            self.log.message(OnboardFailure())
+
+        finally:
+            self.part_streamer.release_parts()
+
+    def extract_package(self):
+        """Extract multipart message from tarball"""
+        desc_part = self.part_streamer.get_parts_by_name("descriptor")[0]
+
+        # Invoke the move API to prevent the part streamer from attempting
+        # to clean up (the file backed package will do that itself)
+        desc_part.move(desc_part.f_out.name)
+
+        package_name = desc_part.get_filename()
+        package_path = desc_part.f_out.name
+
+        extractor = extract.UploadPackageExtractor(self.log)
+        file_backed_packages = extractor.create_packages_from_upload(
+                package_name, package_path
+                )
+
+        return file_backed_packages
+
+    def get_package_store(self, package):
+        return self.package_store_map[package.descriptor_type]
+
+    def store_package(self, package):
+        store = self.get_package_store(package)
+
+        try:
+            store.store_package(package)
+        except rift.package.store.PackageExistsError as e:
+            store.update_package(package)
+
+        stored_package = store.get_package(package.descriptor_id)
+
+        return stored_package
+
+    def delete_stored_package(self, package):
+        self.log.info("Deleting stored package: %s", package)
+        store = self.get_package_store(package)
+        try:
+            store.delete_package(package.descriptor_id)
+        except Exception as e:
+            self.log.warning("Failed to delete package from store: %s", str(e))
+
+    def upload_images(self, package, package_checksums):
+        image_file_map = rift.package.image.get_package_image_files(package)
+        if not image_file_map:
+            return
+
+        name_hdl_map = {name: package.open(image_file_map[name]) for name in image_file_map}
+        try:
+            for image_name, image_hdl in name_hdl_map.items():
+                image_file = image_file_map[image_name]
+                if image_file in package_checksums:
+                    image_checksum = package_checksums[image_file]
+                else:
+                    self.log.warning("checksum not provided for image %s.  Calculating checksum",
+                                     image_file)
+                    image_checksum = rift.package.checksums.checksum(
+                            package.open(image_file_map[image_name])
+                            )
+                try:
+                    self.uploader.upload_image(image_name, image_checksum, image_hdl)
+                    self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+
+                except image.ImageUploadError as e:
+                    raise MessageException(OnboardImageUploadError()) from e
+
+        finally:
+            _ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
+
+    def extract_charms(self, package):
+        try:
+            charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
+            charm_extractor.extract_charms(package)
+        except rift.package.charm.CharmExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_scripts(self, package):
+        try:
+            script_extractor = rift.package.script.PackageScriptExtractor(self.log)
+            script_extractor.extract_scripts(package)
+        except rift.package.script.ScriptExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_configs(self, package):
+        try:
+            config_extractor = rift.package.config.PackageConfigExtractor(self.log)
+            config_extractor.extract_configs(package)
+        except rift.package.config.ConfigExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def extract_icons(self, package):
+        try:
+            icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
+            icon_extractor.extract_icons(package)
+        except rift.package.icon.IconExtractionError as e:
+            raise MessageException(OnboardExtractionError()) from e
+
+    def validate_package(self, package):
+        checksum_validator = rift.package.package.PackageChecksumValidator(self.log)
+
+        try:
+            file_checksums = checksum_validator.validate(package)
+        except rift.package.package.PackageFileChecksumError as e:
+            raise MessageException(OnboardChecksumMismatch(e.filename)) from e
+        except rift.package.package.PackageValidationError as e:
+            raise MessageException(OnboardUnreadablePackage()) from e
+
+        return file_checksums
+
+    def onboard_descriptors(self, package):
+        descriptor_msg = package.descriptor_msg
+
+        self.log.message(OnboardDescriptorOnboard())
+
+        try:
+            self.onboarder.onboard(descriptor_msg)
+        except onboard.OnboardError as e:
+            raise MessageException(OnboardDescriptorError(package.descriptor_file)) from e
+
+
+class UploaderApplication(tornado.web.Application):
+    def __init__(self, tasklet):
+        self.tasklet = tasklet
+        self.accounts = []
+        self.messages = collections.defaultdict(list)
+        self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports')
+
+        manifest = tasklet.tasklet_info.get_pb_manifest()
+        self.use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+        self.ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        self.ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+        self.uploader = image.ImageUploader(self.log, self.loop, tasklet.dts)
+        self.onboarder = onboard.DescriptorOnboarder(
+                self.log, "127.0.0.1", 8008, self.use_ssl, self.ssl_cert, self.ssl_key
+                )
+        self.package_store_map = {
+                "vnfd": self.tasklet.vnfd_package_store,
+                "nsd": self.tasklet.nsd_package_store,
+                }
+
+        self.exporter = export.DescriptorPackageArchiveExporter(self.log)
+        self.loop.create_task(export.periodic_export_cleanup(self.log, self.loop, self.export_dir))
+
+        attrs = dict(log=self.log, loop=self.loop)
+
+        export_attrs = attrs.copy()
+        export_attrs.update({
+            "store_map": self.package_store_map,
+            "exporter": self.exporter,
+            "catalog_map": {
+                "vnfd": self.vnfd_catalog,
+                "nsd": self.nsd_catalog
+                }
+            })
+
+        super(UploaderApplication, self).__init__([
+            (r"/api/update", UpdateHandler, attrs),
+            (r"/api/upload", UploadHandler, attrs),
+
+            (r"/api/upload/([^/]+)/state", UploadStateHandler, attrs),
+            (r"/api/update/([^/]+)/state", UpdateStateHandler, attrs),
+            (r"/api/export/([^/]+)/state", export.ExportStateHandler, attrs),
+
+            (r"/api/export/(nsd|vnfd)$", export.ExportHandler, export_attrs),
+            (r"/api/export/([^/]+.tar.gz)", tornado.web.StaticFileHandler, {
+                "path": self.export_dir,
+                }),
+            (r"/api/export/([^/]+.zip)", tornado.web.StaticFileHandler, {
+                "path": self.export_dir,
+                }),
+            ])
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def loop(self):
+        return self.tasklet.loop
+
+    def get_logger(self, transaction_id):
+        return message.Logger(self.log, self.messages[transaction_id])
+
+    def onboard(self, part_streamer, transaction_id, auth=None):
+        log = message.Logger(self.log, self.messages[transaction_id])
+
+        OnboardPackage(
+                log,
+                self.loop,
+                part_streamer,
+                auth,
+                self.onboarder,
+                self.uploader,
+                self.package_store_map,
+                ).start()
+
+    def update(self, part_streamer, transaction_id, auth=None):
+        log = message.Logger(self.log, self.messages[transaction_id])
+
+        UpdatePackage(
+                log,
+                self.loop,
+                part_streamer,
+                auth,
+                self.onboarder,
+                self.uploader,
+                self.package_store_map,
+                ).start()
+
+    @property
+    def vnfd_catalog(self):
+        return self.tasklet.vnfd_catalog
+
+    @property
+    def nsd_catalog(self):
+        return self.tasklet.nsd_catalog
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/rwlaunchpad.py
new file mode 100755 (executable)
index 0000000..3dcd549
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwlaunchpad
+
+class Tasklet(rift.tasklets.rwlaunchpad.LaunchpadTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/CMakeLists.txt
new file mode 100644 (file)
index 0000000..549af43
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(PROGRAMS rwlaunchpad
+        DESTINATION usr/bin
+        COMPONENT ${INSTALL_COMPONENT}
+        )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad b/rwlaunchpad/plugins/rwlaunchpadtasklet/scripts/rwlaunchpad
new file mode 100755 (executable)
index 0000000..21a06b7
--- /dev/null
@@ -0,0 +1,144 @@
+#!/bin/bash
+
+# Script details
+SCRIPTNAME=`basename $0`
+SCRIPT=$0
+SCRIPT_ARGS=${@}
+
+# Initialize some of the variables
+if [ "$RIFT_LP_ADDR" = "" ]; then
+  RIFT_LP_ADDR="localhost"
+fi
+PKGS=()
+INSTANTIATE=0
+DESC_ID=""
+NS_NAME=""
+LOGGING=0
+RIFT_LP_PKG_UPLOAD_URL="https://${RIFT_LP_ADDR}:4567/api/upload"
+
+######################################################################
+#  Function:usage                                                    #
+#           Prints usage                                             #
+######################################################################
+function usage() {
+  cat <<EOF
+  usage $SCRIPTNAME [-h] [-r launchpad-ip][-u upload-package][-i ns-service-name [-d descriptor-id]][-l]
+       -h : show this message
+       -r : launchpad ip address  -  defaults to RIFT_LP_ADDR enviroment variable
+       -u : upload package with the package name specified
+       -i : Instantiate a network service with network service name
+       -d : Instantiate a network service with the specified descriptor
+       -l : Log to file
+EOF
+}
+
+######################################################################
+#  Function:validate_args                                            #
+#           Validates the passed arguments                           #
+######################################################################
+function validate_args () {
+  if [ "$RIFT_LP_ADDR" = "" ]; then
+    echo "RIFT LP address must be specified - set RIFT_LP_ADDR or specify -l option"
+    usage
+    exit 1
+  fi
+  if [ "${#PKGS[@]}" -eq 0 -a "${INSTANTIATE}" -eq 0 ]; then
+    echo "One of -u or -i option must be specified"
+    usage
+    exit 1
+  fi
+  if [ "${INSTANTIATE}" -eq 1 ]; then
+    if [ "${NS_NAME}" = "" -o "${DESC_ID}" = "" ]; then
+      echo "Must specify both descriptor id and ns service name when -i is specified"
+      usage
+      exit 1
+    fi
+  fi
+}
+
+######################################################################
+#  Function:upload_pacakage                                          #
+#           Uploads a package with the passed argument               #
+######################################################################
+function upload_package() {
+  if [ -z "$1" ]; then
+    echo "upload_package: package name should be passed in as an argument"
+    usage
+    exit 1
+  fi
+  PACKAGE=$1
+  curl --insecure -F "descriptor=@${PACKAGE}" ${RIFT_LP_PKG_UPLOAD_URL}
+}
+
+######################################################################
+#  Function:instantiate_ns                                           #
+#           Instantiates a netork service                            #
+######################################################################
+function instantiate_ns() {
+  echo "instantiate_ns need implementation"
+}
+
+
+while getopts ":hl:r:u:i:n:" OPTION
+do
+    case $OPTION in
+        h)
+            usage
+            exit 1
+            ;;
+        r)
+            RIFT_LP_ADDR=$OPTARG
+            RIFT_LP_PKG_UPLOAD_URL="https://${RIFT_LP_ADDR}:4567/api/upload"
+            ;;
+        u)
+            PKGS+=($OPTARG)
+            ;;
+        i)
+            INSTANTIATE=1
+            NS_NAME=$OPTARG
+            ;;
+        n)
+            DESC_ID=$OPTARG
+            ;;
+        l)
+            LOGGING=1
+            ;;
+        *)
+            usage
+            exit 1
+            ;;
+    esac
+done
+
+shift $((OPTIND-1))
+
+validate_args
+
+if [ $LOGGING -eq 1 ]; then
+    LOGDIR="/tmp"
+    LOGFILE="$LOGDIR/$SCRIPTNAME-$DATE.log"
+    echo "Logging to file $LOGFILE"
+
+    # Redirect stdout ( > ) and stderr to file
+    # and store the STDOUT and STDERR for later use
+    exec 3>&1
+    exec 4>&2
+    exec >$LOGFILE
+    exec 2>&1
+fi
+
+echo "Started at $DATE"
+
+# Iterate through the packages and upload them
+for PKG in "${PKGS[@]}"
+do
+  echo "Uploading package $PKG"
+  upload_package $PKG
+  echo ""
+done
+
+if [ "${INSTANTIATE}" -eq 1 ]; then
+  instantiate_ns $DESC_ID
+fi
+
+echo "Ended at $DATE"
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8f2e904
--- /dev/null
@@ -0,0 +1,38 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 04/22/2016
+# 
+
+rift_py3test(utest_serializer
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_serializer.py
+  )
+
+rift_py3test(utest_export
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_export.py
+  )
+
+rift_py3test(utest_onboard
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_onboard.py
+  )
+
+rift_py3test(utest_package.py
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_package.py
+  )
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/run_tests.sh
new file mode 100755 (executable)
index 0000000..3efa1fc
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 -m unittest discover --pattern=*.py
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_export.py
new file mode 100755 (executable)
index 0000000..7a787c7
--- /dev/null
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import io
+import os
+import sys
+import tarfile
+import tempfile
+import time
+import unittest
+import uuid
+import xmlrunner
+
+import rift.package.archive
+import rift.package.charm
+import rift.package.checksums
+import rift.package.config
+import rift.package.convert
+import rift.package.icon
+import rift.package.package
+import rift.package.script
+import rift.package.store
+
+from rift.tasklets.rwlaunchpad import export
+
+import gi
+gi.require_version('RwVnfdYang', '1.0')
+from gi.repository import (
+        RwVnfdYang,
+        VnfdYang,
+        )
+
+import utest_package
+
+
+class TestExport(utest_package.PackageTestCase):
+    def setUp(self):
+        super().setUp()
+        self._exporter = export.DescriptorPackageArchiveExporter(self._log)
+        self._rw_vnfd_serializer = rift.package.convert.RwVnfdSerializer()
+        self._vnfd_serializer = rift.package.convert.VnfdSerializer()
+
+    def test_create_archive(self):
+        rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+                id="new_id", name="new_name", description="new_description"
+                )
+        json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
+
+        vnfd_package = self.create_vnfd_package()
+        with io.BytesIO() as archive_hdl:
+            archive = self._exporter.create_archive(
+                    archive_hdl, vnfd_package, json_desc_str, self._rw_vnfd_serializer
+                    )
+
+            archive_hdl.seek(0)
+
+            # Create a new read-only archive from the archive handle and a package from that archive
+            archive = rift.package.archive.TarPackageArchive(self._log, archive_hdl)
+            package = archive.create_package()
+
+            # Ensure that the descriptor in the package has been overwritten
+            self.assertEqual(package.descriptor_msg, rw_vnfd_msg)
+
+    def test_export_package(self):
+        rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+                id="new_id", name="new_name", description="new_description",
+                meta="THIS FIELD IS NOT IN REGULAR VNFD"
+                )
+        vnfd_msg = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+        vnfd_msg.from_dict(rw_vnfd_msg.as_dict(), ignore_missing_keys=True)
+
+        self.assertNotEqual(rw_vnfd_msg, vnfd_msg)
+
+        json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
+
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            vnfd_package = self.create_vnfd_package()
+            pkg_id = str(uuid.uuid4())
+            exported_path = self._exporter.export_package(
+                    vnfd_package, tmp_dir, pkg_id, json_desc_str, self._vnfd_serializer
+                    )
+
+            self.assertTrue(os.path.isfile(exported_path))
+            self.assertTrue(tarfile.is_tarfile(exported_path))
+
+            with open(exported_path, "rb") as archive_hdl:
+                archive = rift.package.archive.TarPackageArchive(self._log, archive_hdl)
+                package = archive.create_package()
+
+                self.assertEqual(package.descriptor_msg, vnfd_msg)
+
+    def test_export_cleanup(self):
+        loop = asyncio.get_event_loop()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            archive_files = [tempfile.mkstemp(dir=tmp_dir, suffix=".tar.gz")[1] for _ in range(2)]
+
+            # Set the mtime on only one of the files to test the min_age_secs argument
+            times = (time.time(), time.time() - 10)
+            os.utime(archive_files[0], times)
+
+            task = loop.create_task(
+                    export.periodic_export_cleanup(
+                        self._log, loop, tmp_dir, period_secs=.01, min_age_secs=5
+                        )
+                    )
+            loop.run_until_complete(asyncio.sleep(.05, loop=loop))
+
+            if task.done() and task.exception() is not None:
+                raise task.exception()
+
+            self.assertFalse(task.done())
+
+            self.assertFalse(os.path.exists(archive_files[0]))
+            self.assertTrue(os.path.exists(archive_files[1]))
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_onboard.py
new file mode 100755 (executable)
index 0000000..871132f
--- /dev/null
@@ -0,0 +1,294 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import base64
+import concurrent.futures
+import io
+import logging
+import os
+import sys
+import tornado.testing
+import tornado.web
+import unittest
+import uuid
+import xmlrunner
+
+from rift.package import convert
+from rift.tasklets.rwlaunchpad import onboard
+import rift.test.dts
+
+import gi
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        VnfdYang,
+        )
+
+
+class RestconfDescriptorHandler(tornado.web.RequestHandler):
+    DESC_SERIALIZER_MAP = {
+            "nsd": convert.NsdSerializer(),
+            "vnfd": convert.VnfdSerializer(),
+            }
+
+    class AuthError(Exception):
+        pass
+
+
+    class ContentTypeError(Exception):
+        pass
+
+
+    class RequestBodyError(Exception):
+        pass
+
+
+    def initialize(self, log, auth, info):
+        self._auth = auth
+        # The superclass has self._log already defined so use a different name
+        self._logger = log
+        self._info = info
+        self._logger.debug('Created restconf descriptor handler')
+
+    def _verify_auth(self):
+        if self._auth is None:
+            return None
+
+        auth_header = self.request.headers.get('Authorization')
+        if auth_header is None or not auth_header.startswith('Basic '):
+            self.set_status(401)
+            self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+            self._transforms = []
+            self.finish()
+
+            msg = "Missing Authorization header"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.AuthError(msg)
+
+        auth_header = auth_header.encode('ascii')
+        auth_decoded = base64.decodebytes(auth_header[6:]).decode()
+        login, password = auth_decoded.split(':', 2)
+        login = login
+        password = password
+        is_auth = ((login, password) == self._auth)
+
+        if not is_auth:
+            self.set_status(401)
+            self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+            self._transforms = []
+            self.finish()
+
+            msg = "Incorrect username and password in auth header: got {}, expected {}".format(
+                    (login, password), self._auth
+                    )
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.AuthError(msg)
+
+    def _verify_content_type_header(self):
+        content_type_header = self.request.headers.get('content-type')
+        if content_type_header is None:
+            self.set_status(415)
+            self._transforms = []
+            self.finish()
+
+            msg = "Missing content-type header"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.ContentTypeError(msg)
+
+        if content_type_header != "application/vnd.yang.data+json":
+            self.set_status(415)
+            self._transforms = []
+            self.finish()
+
+            msg = "Unsupported content type: %s" % content_type_header
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.ContentTypeError(msg)
+
+    def _verify_headers(self):
+        self._verify_auth()
+        self._verify_content_type_header()
+
+    def _verify_request_body(self, descriptor_type):
+        if descriptor_type not in RestconfDescriptorHandler.DESC_SERIALIZER_MAP:
+            raise ValueError("Unsupported descriptor type: %s" % descriptor_type)
+
+        body = self.request.body
+        bytes_hdl = io.BytesIO(body)
+
+        serializer = RestconfDescriptorHandler.DESC_SERIALIZER_MAP[descriptor_type]
+
+        try:
+            message = serializer.from_file_hdl(bytes_hdl, ".json")
+        except convert.SerializationError as e:
+            self.set_status(400)
+            self._transforms = []
+            self.finish()
+
+            msg = "Descriptor request body not valid"
+            self._logger.error(msg)
+            raise RestconfDescriptorHandler.RequestBodyError() from e
+
+        self._info.last_request_message = message
+
+        self._logger.debug("Received a valid descriptor request")
+
+    def put(self, descriptor_type):
+        self._info.last_descriptor_type = descriptor_type
+        self._info.last_method = "PUT"
+
+        try:
+            self._verify_headers()
+        except (RestconfDescriptorHandler.AuthError,
+                RestconfDescriptorHandler.ContentTypeError):
+            return None
+
+        try:
+            self._verify_request_body(descriptor_type)
+        except RestconfDescriptorHandler.RequestBodyError:
+            return None
+
+        self.write("Response doesn't matter?")
+
+    def post(self, descriptor_type):
+        self._info.last_descriptor_type = descriptor_type
+        self._info.last_method = "POST"
+
+        try:
+            self._verify_headers()
+        except (RestconfDescriptorHandler.AuthError,
+                RestconfDescriptorHandler.ContentTypeError):
+            return None
+
+        try:
+            self._verify_request_body(descriptor_type)
+        except RestconfDescriptorHandler.RequestBodyError:
+            return None
+
+        self.write("Response doesn't matter?")
+
+
+class HandlerInfo(object):
+    def __init__(self):
+        self.last_request_message = None
+        self.last_descriptor_type = None
+        self.last_method = None
+
+
+class OnboardTestCase(tornado.testing.AsyncHTTPTestCase):
+    AUTH = ("admin", "admin")
+    def setUp(self):
+        self._log = logging.getLogger(__file__)
+        self._loop = asyncio.get_event_loop()
+
+        self._handler_info = HandlerInfo()
+        super().setUp()
+        self._port = self.get_http_port()
+        self._onboarder = onboard.DescriptorOnboarder(
+                log=self._log, port=self._port
+                )
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+    def get_app(self):
+        attrs = dict(auth=OnboardTestCase.AUTH, log=self._log, info=self._handler_info)
+        return tornado.web.Application([
+            (r"/api/config/.*/(nsd|vnfd)", RestconfDescriptorHandler, attrs),
+            ])
+
+    @rift.test.dts.async_test
+    def test_onboard_nsd(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+        yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+        self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+        self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
+        self.assertEqual(self._handler_info.last_method, "POST")
+
+    @rift.test.dts.async_test
+    def test_update_nsd(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+        yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+        self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+        self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
+        self.assertEqual(self._handler_info.last_method, "PUT")
+
+    @rift.test.dts.async_test
+    def test_bad_descriptor_type(self):
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog()
+        with self.assertRaises(TypeError):
+            yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+
+        with self.assertRaises(TypeError):
+            yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+
+    @rift.test.dts.async_test
+    def test_bad_port(self):
+        # Use a port not used by the instantiated server
+        new_port = self._port - 1
+        self._onboarder.port = new_port
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+
+        with self.assertRaises(onboard.OnboardError):
+            yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
+
+        with self.assertRaises(onboard.UpdateError):
+            yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
+
+    @rift.test.dts.async_test
+    def test_timeout(self):
+        # Set the timeout to something minimal to speed up test
+        self._onboarder.timeout = .1
+
+        nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+
+        # Force the request to timeout by running the call synchronously so the
+        with self.assertRaises(onboard.OnboardError):
+            self._onboarder.onboard(nsd_msg)
+
+        # Force the request to timeout by running the call synchronously so the
+        with self.assertRaises(onboard.UpdateError):
+            self._onboarder.update(nsd_msg)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_package.py
new file mode 100755 (executable)
index 0000000..1efd2df
--- /dev/null
@@ -0,0 +1,480 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import json
+import os
+import sys
+import tarfile
+import tempfile
+import unittest
+import xmlrunner
+import yaml
+
+import rift.package.archive
+import rift.package.package
+import rift.package.charm
+import rift.package.icon
+import rift.package.script
+import rift.package.config
+import rift.package.store
+import rift.package.checksums
+import rift.package.cloud_init
+
+
+import gi
+gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        RwpersonDbYang,
+        RwYang,
+        )
+
+
+nsd_yaml = b"""nsd:nsd-catalog:
+  nsd:nsd:
+  - nsd:id: gw_corpA
+    nsd:name: gw_corpA
+    nsd:description: Gateways to access as corpA to PE1 and PE2
+"""
+
+vnfd_yaml = b"""vnfd:vnfd-catalog:
+  vnfd:vnfd:
+  - vnfd:id: gw_corpA_vnfd
+    vnfd:name: gw_corpA_vnfd
+    vnfd:description: Gateways to access as corpA to PE1 and PE2
+"""
+
+nsd_filename = "gw_corpA__nsd.yaml"
+vnfd_filename = "gw_corpA__vnfd.yaml"
+
+
+def file_hdl_md5(file_hdl):
+    return rift.package.checksums.checksum(file_hdl)
+
+
+class ArchiveTestCase(unittest.TestCase):
+    def setUp(self):
+        self._log = logging.getLogger()
+
+        self._tar_file_hdl = io.BytesIO()
+        self._tar = tarfile.open(fileobj=self._tar_file_hdl, mode="w|gz")
+
+        self._nsd_yaml_hdl = io.BytesIO(nsd_yaml)
+        self._vnfd_yaml_hdl = io.BytesIO(vnfd_yaml)
+
+    def tearDown(self):
+        self._nsd_yaml_hdl.close()
+        self._vnfd_yaml_hdl.close()
+        self._tar.close()
+        self._tar_file_hdl.close()
+
+    def create_tar_package_archive(self):
+        self._tar.close()
+        self._tar_file_hdl.flush()
+        self._tar_file_hdl.seek(0)
+        archive = rift.package.package.TarPackageArchive(
+                log=self._log,
+                tar_file_hdl=self._tar_file_hdl,
+                )
+
+        return archive
+
+    def add_tarinfo(self, name, file_hdl, mode=0o777):
+        tarinfo = tarfile.TarInfo(name)
+        tarinfo.size = len(file_hdl.read())
+        assert tarinfo.size > 0
+        file_hdl.seek(0)
+        self._tar.addfile(tarinfo, file_hdl)
+
+    def add_tarinfo_dir(self, name):
+        tarinfo = tarfile.TarInfo(name)
+        tarinfo.type = tarfile.DIRTYPE
+        self._tar.addfile(tarinfo)
+
+    def add_nsd_yaml(self):
+        self.add_tarinfo(nsd_filename, io.BytesIO(nsd_yaml))
+
+    def add_vnfd_yaml(self):
+        self.add_tarinfo(vnfd_filename, io.BytesIO(vnfd_yaml))
+
+
+class PackageTestCase(ArchiveTestCase):
+    def create_nsd_package(self):
+        self.add_nsd_yaml()
+        archive = self.create_tar_package_archive()
+        package = archive.create_package()
+
+        return package
+
+    def create_vnfd_package(self):
+        self.add_vnfd_yaml()
+        archive = self.create_tar_package_archive()
+        package = archive.create_package()
+
+        return package
+
+
+class TestCreateArchive(ArchiveTestCase):
+    def test_create_tar_archive(self):
+        self.add_nsd_yaml()
+        archive = self.create_tar_package_archive()
+        self.assertEquals(set(archive.filenames), {nsd_filename})
+
+    def test_nsd_tar_archive(self):
+        #Write the NSD YAML to the tar file
+        self.add_nsd_yaml()
+
+        archive = self.create_tar_package_archive()
+        with archive.open_file(nsd_filename) as nsd_hdl:
+            nsd_bytes = nsd_hdl.read()
+
+        self.assertEquals(nsd_bytes, nsd_yaml)
+
+
+class TestPackage(PackageTestCase):
+    def create_vnfd_package_archive(self, package, hdl):
+        # Create an archive from a package
+        archive = rift.package.archive.TarPackageArchive.from_package(
+                self._log, package, hdl,
+                )
+        # Closing the archive writes any closing bytes to the file handle
+        archive.close()
+        hdl.seek(0)
+
+        return archive
+
+    def test_create_nsd_package_from_archive(self):
+        package = self.create_nsd_package()
+        self.assertTrue(isinstance(package, rift.package.package.NsdPackage))
+
+        json_str = package.json_descriptor
+        desc_dict = json.loads(json_str)
+        self.assertIn("nsd:nsd-catalog", desc_dict)
+
+    def test_create_vnfd_package_from_archive(self):
+        package = self.create_vnfd_package()
+        self.assertTrue(isinstance(package, rift.package.package.VnfdPackage))
+
+        json_str = package.json_descriptor
+        desc_dict = json.loads(json_str)
+        self.assertIn("vnfd:vnfd-catalog", desc_dict)
+
+    def test_create_vnfd_archive_from_package(self):
+        package = self.create_vnfd_package()
+        hdl = io.BytesIO()
+        self.create_vnfd_package_archive(package, hdl)
+
+        # Ensure that the archive created was valid
+        with tarfile.open(fileobj=hdl, mode='r|gz'):
+            pass
+
+    def test_round_trip_vnfd_package_from_archive(self):
+        package = self.create_vnfd_package()
+        hdl = io.BytesIO()
+        self.create_vnfd_package_archive(package, hdl)
+
+        archive = rift.package.archive.TarPackageArchive(self._log, hdl)
+        def md5(file_hdl):
+            return rift.package.checksums.checksum(file_hdl)
+
+        # Create the package from the archive and validate file checksums and modes
+        new_package = archive.create_package()
+
+        self.assertEqual(package.files, new_package.files)
+        self.assertEqual(type(package), type(new_package))
+
+        for filename in package.files:
+            pkg_file = package.open(filename)
+            new_pkg_file = new_package.open(filename)
+            self.assertEqual(md5(pkg_file), md5(new_pkg_file))
+
+    def test_create_nsd_package_from_file(self):
+        nsd_file_name = "asdf_nsd.yaml"
+        hdl = io.BytesIO(nsd_yaml)
+        hdl.name = nsd_file_name
+
+        package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                self._log, hdl
+                )
+        self.assertTrue(isinstance(package, rift.package.package.NsdPackage))
+
+        with package.open(nsd_file_name) as nsd_hdl:
+            nsd_data = nsd_hdl.read()
+            self.assertEquals(yaml.load(nsd_data), yaml.load(nsd_yaml))
+
+    def test_create_vnfd_package_from_file(self):
+        vnfd_file_name = "asdf_vnfd.yaml"
+        hdl = io.BytesIO(vnfd_yaml)
+        hdl.name = vnfd_file_name
+
+        package = rift.package.package.DescriptorPackage.from_descriptor_file_hdl(
+                self._log, hdl
+                )
+        self.assertTrue(isinstance(package, rift.package.package.VnfdPackage))
+
+        with package.open(vnfd_file_name) as vnfd_hdl:
+            vnfd_data = vnfd_hdl.read()
+            self.assertEquals(yaml.load(vnfd_data), yaml.load(vnfd_yaml))
+
+
+class TestPackageCharmExtractor(PackageTestCase):
+    def add_charm_dir(self, charm_name):
+        charm_dir = "charms/trusty/{}".format(charm_name)
+        charm_file = "{}/actions.yaml".format(charm_dir)
+        charm_text = b"THIS IS A FAKE CHARM"
+        self.add_tarinfo_dir(charm_dir)
+        self.add_tarinfo(charm_file, io.BytesIO(charm_text))
+
+    def test_extract_charm(self):
+        charm_name = "charm_a"
+        self.add_charm_dir(charm_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.charm.PackageCharmExtractor(self._log, tmp_dir)
+            extractor.extract_charms(package)
+
+            charm_dir = extractor.get_extracted_charm_dir(package.descriptor_id, charm_name)
+            self.assertTrue(os.path.exists(charm_dir))
+            self.assertTrue(os.path.isdir(charm_dir))
+
+
+class TestPackageIconExtractor(PackageTestCase):
+    def add_icon_file(self, icon_name):
+        icon_file = "icons/{}".format(icon_name)
+        icon_text = b"png file bytes"
+        self.add_tarinfo(icon_file, io.BytesIO(icon_text))
+
+    def test_extract_icon(self):
+        icon_name = "icon_a"
+        self.add_icon_file(icon_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.icon.PackageIconExtractor(self._log, tmp_dir)
+            extractor.extract_icons(package)
+
+            icon_file = extractor.get_extracted_icon_path(
+                    package.descriptor_type, package.descriptor_id, icon_name
+                    )
+            self.assertTrue(os.path.exists(icon_file))
+            self.assertTrue(os.path.isfile(icon_file))
+
+
+class TestPackageScriptExtractor(PackageTestCase):
+    def add_script_file(self, script_name):
+        script_file = "scripts/{}".format(script_name)
+        script_text = b"""#!/usr/bin/python
+        print("hi")
+        """
+        self.add_tarinfo(script_file, io.BytesIO(script_text), mode=0o666)
+
+    def test_extract_script(self):
+        script_name = "add_corporation.py"
+        self.add_script_file(script_name)
+        package = self.create_vnfd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.script.PackageScriptExtractor(self._log, tmp_dir)
+            extractor.extract_scripts(package)
+
+            script_dir = extractor.get_extracted_script_path(package.descriptor_id, script_name)
+            self.assertTrue(os.path.exists(script_dir))
+            self.assertTrue(os.path.isfile(script_dir))
+
+class TestPackageCloudInitExtractor(PackageTestCase):
+    def add_cloud_init_file(self, cloud_init_filename):
+        script_file = "cloud_init/{}".format(cloud_init_filename)
+        script_text = b"""#cloud-config"""
+        self.add_tarinfo(script_file, io.BytesIO(script_text), mode=0o666)
+
+    def test_read_cloud_init(self):
+        script_name = "testVM_cloud_init.cfg"
+        valid_script_text = "#cloud-config"
+        self.add_cloud_init_file(script_name)
+        package = self.create_vnfd_package()
+
+        extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+        cloud_init_contents = extractor.read_script(package, script_name)
+
+        self.assertEquals(cloud_init_contents, valid_script_text)
+
+    def test_cloud_init_file_missing(self):
+        script_name = "testVM_cloud_init.cfg"
+        package = self.create_vnfd_package()
+
+        extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+
+        with self.assertRaises(rift.package.cloud_init.CloudInitExtractionError):
+            extractor.read_script(package, script_name)
+
+class TestPackageConfigExtractor(PackageTestCase):
+    def add_ns_config_file(self, nsd_id):
+        config_file = "ns_config/{}.yaml".format(nsd_id)
+        config_text = b""" ns_config """
+        self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
+
+        return config_file
+
+    def add_vnf_config_file(self, vnfd_id, member_vnf_index):
+        config_file = "vnf_config/{}_{}.yaml".format(vnfd_id, member_vnf_index)
+        config_text = b""" vnf_config """
+        self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
+
+        return config_file
+
+    def test_extract_config(self):
+        ns_config_file = self.add_ns_config_file("nsd_id")
+        vnf_config_file = self.add_vnf_config_file("vnfd_id", 1)
+        package = self.create_nsd_package()
+        with tempfile.TemporaryDirectory() as tmp_dir:
+            extractor = rift.package.config.PackageConfigExtractor(self._log, tmp_dir)
+            extractor.extract_configs(package)
+
+            dest_ns_config_file = extractor.get_extracted_config_path(package.descriptor_id, ns_config_file)
+            dest_vnf_config_file = extractor.get_extracted_config_path(package.descriptor_id, vnf_config_file)
+            self.assertTrue(os.path.isfile(dest_ns_config_file))
+            self.assertTrue(os.path.isfile(dest_vnf_config_file))
+
+
+class TestPackageValidator(PackageTestCase):
+    def setUp(self):
+        super().setUp()
+        self._validator = rift.package.package.PackageChecksumValidator(self._log)
+
+    def create_checksum_file(self, file_md5_map):
+        checksum_hdl = io.BytesIO()
+        for file_name, md5 in file_md5_map.items():
+            checksum_hdl.write("{}  {}\n".format(md5, file_name).encode())
+
+        checksum_hdl.flush()
+        checksum_hdl.seek(0)
+
+        self.add_tarinfo("checksums.txt", checksum_hdl)
+        self._tar.addfile(tarfile.TarInfo(), checksum_hdl)
+
+    def create_nsd_package_with_checksum(self):
+        self.create_checksum_file(
+                {nsd_filename: file_hdl_md5(io.BytesIO(nsd_yaml))}
+                )
+        package = self.create_nsd_package()
+        return package
+
+    def test_package_no_checksum(self):
+        package = self.create_nsd_package()
+
+        # For now, a missing checksum file will be supported.
+        # No files will be validated.
+        validated_files = self._validator.validate(package)
+        self.assertEquals(validated_files, {})
+
+    def test_package_with_checksum(self):
+        package = self.create_nsd_package_with_checksum()
+        validated_files = self._validator.validate(package)
+        self.assertEquals(list(validated_files.keys()), [nsd_filename])
+
+
+class TestPackageStore(PackageTestCase):
+    def create_store(self, root_dir):
+        store = rift.package.store.PackageFilesystemStore(self._log, root_dir)
+        return store
+
+    def create_and_store_package(self, store):
+        package = self.create_nsd_package()
+        store.store_package(package)
+
+        return package
+
+    def test_store_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+            new_package = store.get_package(package.descriptor_id)
+            self.assertEquals(new_package.files, package.files)
+            self.assertEquals(type(new_package), type(package))
+
+    def test_store_reload_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            new_store = self.create_store(root_dir)
+            new_package = new_store.get_package(package.descriptor_id)
+
+            self.assertEquals(new_package.files, package.files)
+            self.assertEquals(type(new_package), type(package))
+
+    def test_delete_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            store.get_package(package.descriptor_id)
+            store.delete_package(package.descriptor_id)
+
+            with self.assertRaises(rift.package.store.PackageStoreError):
+                store.get_package(package.descriptor_id)
+
+    def test_store_exist_package(self):
+        with tempfile.TemporaryDirectory() as root_dir:
+            store = self.create_store(root_dir)
+            package = self.create_and_store_package(store)
+
+            with self.assertRaises(rift.package.store.PackageStoreError):
+                store.store_package(package)
+
+
+class TestTemporaryPackage(PackageTestCase):
+    def test_temp_package(self):
+        self._tar_file_hdl = tempfile.NamedTemporaryFile(delete=False)
+        self._tar = tarfile.open(fileobj=self._tar_file_hdl, mode="w|gz")
+
+        self.assertTrue(os.path.exists(self._tar_file_hdl.name))
+
+        package = self.create_nsd_package()
+        with rift.package.package.TemporaryPackage(self._log, package, self._tar_file_hdl) as temp_pkg:
+            self.assertTrue(package is temp_pkg)
+            self.assertEquals(package.files, temp_pkg.files)
+
+        self.assertFalse(os.path.exists(self._tar_file_hdl.name))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py b/rwlaunchpad/plugins/rwlaunchpadtasklet/test/utest_serializer.py
new file mode 100755 (executable)
index 0000000..af8e1f8
--- /dev/null
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import io
+import os
+import sys
+import tempfile
+import unittest
+import xmlrunner
+
+from rift.package.convert import (
+        ProtoMessageSerializer,
+        UnknownExtensionError,
+        SerializationError,
+        )
+
+import gi
+gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwYang', '1.0')
+
+from gi.repository import (
+        RwpersonDbYang,
+        RwYang,
+        )
+
+class TestSerializer(unittest.TestCase):
+    def setUp(self):
+        self._serializer = ProtoMessageSerializer(
+                RwpersonDbYang,
+                RwpersonDbYang.Person
+                )
+
+        self._sample_person = RwpersonDbYang.Person(name="Fred")
+        self._model = RwYang.model_create_libncx()
+        self._model.load_schema_ypbc(RwpersonDbYang.get_schema())
+
+    def test_from_xml_file(self):
+        sample_person_xml = self._sample_person.to_xml_v2(self._model)
+        with io.StringIO(sample_person_xml) as file_hdl:
+            person = self._serializer.from_file_hdl(file_hdl, ".xml")
+            self.assertEqual(person, self._sample_person)
+
+    def test_from_yaml_file(self):
+        sample_person_yaml = self._sample_person.to_yaml(self._model)
+        with io.StringIO(sample_person_yaml) as file_hdl:
+
+            person = self._serializer.from_file_hdl(file_hdl, ".yml")
+            self.assertEqual(person, self._sample_person)
+
+    def test_from_json_file(self):
+        sample_person_json = self._sample_person.to_json(self._model)
+        with io.StringIO(sample_person_json) as file_hdl:
+
+            person = self._serializer.from_file_hdl(file_hdl, ".json")
+            self.assertEqual(person, self._sample_person)
+
+    def test_unknown_file_extension(self):
+        with io.StringIO("asdf") as file_hdl:
+            with self.assertRaises(UnknownExtensionError):
+                self._serializer.from_file_hdl(file_hdl, ".foo")
+
+    def test_raises_serialization_error(self):
+        with io.StringIO('</foo>') as file_hdl:
+            with self.assertRaises(SerializationError):
+                person = self._serializer.from_file_hdl(file_hdl, ".json")
+                print(person)
+
+    def test_to_json_string(self):
+        json_str = self._serializer.to_json_string(self._sample_person)
+
+        person = RwpersonDbYang.Person.from_json(self._model, json_str)
+        self.assertEqual(person, self._sample_person)
+
+    def test_to_json_string_invalid_type(self):
+        with self.assertRaises(TypeError):
+            self._serializer.to_json_string(RwpersonDbYang.FlatPerson(name="bob"))
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+
+    args, unknown = parser.parse_known_args(argv)
+    if args.no_runner:
+        runner = None
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt b/rwlaunchpad/plugins/rwmonitor/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6bc0195
--- /dev/null
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 2015/10/30
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwmonitor)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/core.py
+    rift/tasklets/${TASKLET_NAME}/tasklet.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwmonitor/Makefile b/rwlaunchpad/plugins/rwmonitor/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/__init__.py
new file mode 100644 (file)
index 0000000..47bbfc8
--- /dev/null
@@ -0,0 +1 @@
+from .tasklet import MonitorTasklet
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/core.py
new file mode 100644 (file)
index 0000000..b97b2f5
--- /dev/null
@@ -0,0 +1,880 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import abc
+import asyncio
+import collections
+import concurrent.futures
+import importlib
+import time
+
+import gi
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwMon', '1.0')
+from gi.repository import (
+        RwTypes,
+        RwVnfrYang,
+        )
+
+import rw_peas
+
+
+class VdurMissingVimIdError(Exception):
+    def __init__(self, vdur_id):
+        super().__init__("VDUR:{} is has no VIM ID".format(vdur_id))
+
+
+class VdurAlreadyRegisteredError(Exception):
+    def __init__(self, vdur_id):
+        super().__init__("VDUR:{} is already registered".format(vdur_id))
+
+
+class AccountInUseError(Exception):
+    pass
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class AccountAlreadyRegisteredError(Exception):
+    def __init__(self, account_name):
+        msg = "'{}' already registered".format(account_name)
+        super().__init__(account_name)
+
+
+class PluginUnavailableError(Exception):
+    pass
+
+
+class PluginNotSupportedError(PluginUnavailableError):
+    pass
+
+
+class AlarmCreateError(Exception):
+    def __init__(self):
+        super().__init__("failed to create alarm")
+
+
+class AlarmDestroyError(Exception):
+    def __init__(self):
+        super().__init__("failed to destroy alarm")
+
+
+class PluginFactory(object):
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def create(self, cloud_account, plugin_name):
+        pass
+
+    @property
+    def name(self):
+        return self.__class__.PLUGIN_NAME
+
+    @property
+    def fallbacks(self):
+        try:
+            return list(self.__class__.FALLBACKS)
+        except Exception:
+            return list()
+
+
+class MonascaPluginFactory(PluginFactory):
+    PLUGIN_NAME = "monasca"
+    FALLBACKS = ["ceilometer",]
+
+    def create(self, cloud_account):
+        raise PluginUnavailableError()
+
+
+class CeilometerPluginFactory(PluginFactory):
+    PLUGIN_NAME = "ceilometer"
+    FALLBACKS = ["unavailable",]
+
+    def create(self, cloud_account):
+        plugin = rw_peas.PeasPlugin("rwmon_ceilometer", 'RwMon-1.0')
+        impl = plugin.get_interface("Monitoring")
+
+        # Check that the plugin is available on the platform associated with
+        # the cloud account
+        _, available = impl.nfvi_metrics_available(cloud_account)
+        if not available:
+            raise PluginUnavailableError()
+
+        return impl
+
+
+class UnavailablePluginFactory(PluginFactory):
+    PLUGIN_NAME = "unavailable"
+
+    class UnavailablePlugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return None, False
+
+    def create(self, cloud_account):
+        return UnavailablePluginFactory.UnavailablePlugin()
+
+
+class MockPluginFactory(PluginFactory):
+    PLUGIN_NAME = "mock"
+    FALLBACKS = ["unavailable",]
+
+    def create(self, cloud_account):
+        plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+        impl = plugin.get_interface("Monitoring")
+
+        # Check that the plugin is available on the platform associated with
+        # the cloud account
+        _, available = impl.nfvi_metrics_available(cloud_account)
+        if not available:
+            raise PluginUnavailableError()
+
+        return impl
+
+
+class NfviMetricsPluginManager(object):
+    def __init__(self, log):
+        self._plugins = dict()
+        self._log = log
+        self._factories = dict()
+
+        self.register_plugin_factory(MockPluginFactory())
+        self.register_plugin_factory(CeilometerPluginFactory())
+        self.register_plugin_factory(MonascaPluginFactory())
+        self.register_plugin_factory(UnavailablePluginFactory())
+
+    @property
+    def log(self):
+        return self._log
+
+    def register_plugin_factory(self, factory):
+        self._factories[factory.name] = factory
+
+    def plugin(self, account_name):
+        return self._plugins[account_name]
+
+    def register(self, cloud_account, plugin_name):
+        # Check to see if the cloud account has already been registered
+        if cloud_account.name in self._plugins:
+            raise AccountAlreadyRegisteredError(cloud_account.name)
+
+        if plugin_name not in self._factories:
+            raise PluginNotSupportedError(plugin_name)
+
+        # Create a plugin from one of the factories
+        fallbacks = [plugin_name,]
+
+        while fallbacks:
+            name = fallbacks.pop(0)
+            try:
+                factory = self._factories[name]
+                plugin = factory.create(cloud_account)
+                self._plugins[cloud_account.name] = plugin
+                return
+
+            except PluginUnavailableError as e:
+                self.log.warning("plugin for {} unavailable".format(name))
+                fallbacks.extend(factory.fallbacks)
+
+        raise PluginUnavailableError()
+
+    def unregister(self, account_name):
+        if account_name in self._plugins:
+            del self._plugins[account_name]
+
+
+class NfviMetrics(object):
+    """
+    The NfviMetrics class contains the logic to retrieve NFVI metrics for a
+    particular VDUR. Of particular importance is that this object caches the
+    metrics until the data become stale so that it does not create excessive
+    load upon the underlying data-source.
+    """
+
+    # The sample interval defines the maximum time (secs) that metrics will be
+    # cached for. This duration should coincide with the sampling interval used
+    # by the underlying data-source to capture metrics.
+    SAMPLE_INTERVAL = 10
+
+    # The maximum time (secs) an instance will wait for a request to the data
+    # source to be completed
+    TIMEOUT = 2
+
+    def __init__(self, log, loop, account, plugin, vdur):
+        """Creates an instance of NfviMetrics
+
+        Arguments:
+            manager - a NfviInterface instance
+            account - a CloudAccount instance
+            plugin  - an NFVI plugin
+            vdur    - a VDUR instance
+
+        """
+        self._log = log
+        self._loop = loop
+        self._account = account
+        self._plugin = plugin
+        self._timestamp = 0
+        self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+        self._vdur = vdur
+        self._vim_id = vdur.vim_id
+        self._updating = None
+
+    @property
+    def log(self):
+        """The logger used by NfviMetrics"""
+        return self._log
+
+    @property
+    def loop(self):
+        """The current asyncio loop"""
+        return self._loop
+
+    @property
+    def vdur(self):
+        """The VDUR that these metrics are associated with"""
+        return self._vdur
+
+    def retrieve(self):
+        """Return the NFVI metrics for this VDUR
+
+        This function will immediately return the current, known NFVI metrics
+        for the associated VDUR. It will also, if the data are stale, schedule
+        a call to the data-source to retrieve new data.
+
+        """
+        if self.should_update():
+            self._updating = self.loop.create_task(self.update())
+
+        return self._metrics
+
+    def should_update(self):
+        """Return a boolean indicating whether an update should be performed"""
+        running = self._updating is not None and not self._updating.done()
+        overdue = time.time() > self._timestamp + NfviMetrics.SAMPLE_INTERVAL
+
+        return overdue and not running
+
+    @asyncio.coroutine
+    def update(self):
+        """Update the NFVI metrics for the associated VDUR
+
+        This coroutine will request new metrics from the data-source and update
+        the current metrics.
+
+        """
+        try:
+            try:
+                # Make the request to the plugin in a separate thread and do
+                # not exceed the timeout
+                _, metrics = yield from asyncio.wait_for(
+                        self.loop.run_in_executor(
+                            None,
+                            self._plugin.nfvi_metrics,
+                            self._account,
+                            self._vim_id,
+                            ),
+                        timeout=NfviMetrics.TIMEOUT,
+                        loop=self.loop,
+                        )
+
+            except asyncio.TimeoutError:
+                msg = "timeout on request for nfvi metrics (vim-id = {})"
+                self.log.warning(msg.format(self._vim_id))
+                return
+
+            except Exception as e:
+                self.log.exception(e)
+                return
+
+            try:
+                # Create uninitialized metric structure
+                vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+
+                # VCPU
+                vdu_metrics.vcpu.total = self.vdur.vm_flavor.vcpu_count
+                vdu_metrics.vcpu.utilization = metrics.vcpu.utilization
+
+                # Memory (in bytes)
+                vdu_metrics.memory.used = metrics.memory.used
+                vdu_metrics.memory.total = self.vdur.vm_flavor.memory_mb
+                vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total
+
+                # Storage
+                vdu_metrics.storage.used = metrics.storage.used
+                vdu_metrics.storage.total = 1e9 * self.vdur.vm_flavor.storage_gb
+                vdu_metrics.storage.utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+
+                # Network (incoming)
+                vdu_metrics.network.incoming.packets = metrics.network.incoming.packets
+                vdu_metrics.network.incoming.packet_rate = metrics.network.incoming.packet_rate
+                vdu_metrics.network.incoming.bytes = metrics.network.incoming.bytes
+                vdu_metrics.network.incoming.byte_rate = metrics.network.incoming.byte_rate
+
+                # Network (outgoing)
+                vdu_metrics.network.outgoing.packets = metrics.network.outgoing.packets
+                vdu_metrics.network.outgoing.packet_rate = metrics.network.outgoing.packet_rate
+                vdu_metrics.network.outgoing.bytes = metrics.network.outgoing.bytes
+                vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate
+
+                # External ports
+                vdu_metrics.external_ports.total = len(self.vdur.external_interface)
+
+                # Internal ports
+                vdu_metrics.internal_ports.total = len(self.vdur.internal_interface)
+
+                self._metrics = vdu_metrics
+
+            except Exception as e:
+                self.log.exception(e)
+
+        finally:
+            # Regardless of the result of the query, we want to make sure that
+            # we do not poll the data source until another sample duration has
+            # passed.
+            self._timestamp = time.time()
+
+
+class NfviMetricsCache(object):
+    def __init__(self, log, loop, plugin_manager):
+        self._log = log
+        self._loop = loop
+        self._plugin_manager = plugin_manager
+        self._nfvi_metrics = dict()
+
+        self._vim_to_vdur = dict()
+        self._vdur_to_vim = dict()
+
+    def create_entry(self, account, vdur):
+        plugin = self._plugin_manager.plugin(account.name)
+        metrics = NfviMetrics(self._log, self._loop, account, plugin, vdur)
+        self._nfvi_metrics[vdur.vim_id] = metrics
+
+        self._vim_to_vdur[vdur.vim_id] = vdur.id
+        self._vdur_to_vim[vdur.id] = vdur.vim_id
+
+    def destroy_entry(self, vdur_id):
+        vim_id = self._vdur_to_vim[vdur_id]
+
+        del self._nfvi_metrics[vim_id]
+        del self._vdur_to_vim[vdur_id]
+        del self._vim_to_vdur[vim_id]
+
+    def retrieve(self, vim_id):
+        return self._nfvi_metrics[vim_id].retrieve()
+
+    def to_vim_id(self, vdur_id):
+        return self._vdur_to_vim[vdur_id]
+
+    def to_vdur_id(self, vim_id):
+        return self._vim_to_vdur[vim_id]
+
+    def contains_vdur_id(self, vdur_id):
+        return vdur_id in self._vdur_to_vim
+
+    def contains_vim_id(self, vim_id):
+        return vim_id in self._vim_to_vdur
+
+
+class NfviInterface(object):
+    """
+    The NfviInterface serves as an interface for communicating with the
+    underlying infrastructure, i.e. retrieving metrics for VDURs that have been
+    registered with it and managing alarms.
+
+    The NfviInterface should only need to be invoked using a cloud account and
+    optionally a VIM ID; It should not need to handle mapping from VDUR ID to
+    VIM ID.
+    """
+
+    def __init__(self, loop, log, plugin_manager, cache):
+        """Creates an NfviInterface instance
+
+        Arguments:
+            loop           - an event loop
+            log            - a logger
+            plugin_manager - an instance of NfviMetricsPluginManager
+            cache          - an instance of NfviMetricsCache
+
+        """
+        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
+        self._plugin_manager = plugin_manager
+        self._cache = cache
+        self._loop = loop
+        self._log = log
+
+    @property
+    def loop(self):
+        """The event loop used by this NfviInterface"""
+        return self._loop
+
+    @property
+    def log(self):
+        """The event log used by this NfviInterface"""
+        return self._log
+
+    @property
+    def metrics(self):
+        """The list of metrics contained in this NfviInterface"""
+        return list(self._cache._nfvi_metrics.values())
+
+    def nfvi_metrics_available(self, account):
+        plugin = self._plugin_manager.plugin(account.name)
+        _, available = plugin.nfvi_metrics_available(account)
+        return available
+
+    def retrieve(self, vdur_id):
+        """Returns the NFVI metrics for the specified VDUR
+
+        Note, a VDUR must be registered with a NfviInterface before
+        metrics can be retrieved for it.
+
+        Arguments:
+            vdur_id - the ID of the VDUR to whose metrics should be retrieve
+
+        Returns:
+            An NfviMetrics object for the specified VDUR
+
+        """
+        return self._cache.retrieve(self._cache.to_vim_id(vdur_id))
+
+    @asyncio.coroutine
+    def alarm_create(self, account, vim_id, alarm, timeout=5):
+        """Create a new alarm
+
+        Arguments:
+            account - a CloudAccount instance
+            vim_id  - the VM to associate with this alarm
+            alarm   - an alarm structure
+            timeout - the request timeout (sec)
+
+        Raises:
+            If the data source does not respond in a timely manner, an
+            asyncio.TimeoutError will be raised.
+
+        """
+        plugin = self._plugin_manager.plugin(account.name)
+        status = yield from asyncio.wait_for(
+                self.loop.run_in_executor(
+                    None,
+                    plugin.do_alarm_create,
+                    account,
+                    vim_id,
+                    alarm,
+                    ),
+                timeout=timeout,
+                loop=self.loop,
+                )
+
+        if status == RwTypes.RwStatus.FAILURE:
+            raise AlarmCreateError()
+
+    @asyncio.coroutine
+    def alarm_destroy(self, account, alarm_id, timeout=5):
+        """Destroy an existing alarm
+
+        Arguments:
+            account  - a CloudAccount instance
+            alarm_id - the identifier of the alarm to destroy
+            timeout  - the request timeout (sec)
+
+        Raises:
+            If the data source does not respond in a timely manner, an
+            asyncio.TimeoutError will be raised.
+
+        """
+        plugin = self._plugin_manager.plugin(account.name)
+        status = yield from asyncio.wait_for(
+                self.loop.run_in_executor(
+                    None,
+                    plugin.do_alarm_delete,
+                    account,
+                    alarm_id,
+                    ),
+                timeout=timeout,
+                loop=self.loop,
+                )
+
+        if status == RwTypes.RwStatus.FAILURE:
+            raise AlarmDestroyError()
+
+
+class InstanceConfiguration(object):
+    """
+    The InstanceConfiguration class represents configuration information that
+    affects the behavior of the monitor. Essentially this class should contain
+    not functional behavior but serve as a convenient way to share data amongst
+    the components of the monitoring system.
+    """
+
+    def __init__(self):
+        self.polling_period = None
+        self.max_polling_frequency = None
+        self.min_cache_lifetime = None
+        self.public_ip = None
+
+
+class Monitor(object):
+    """
+    The Monitor class is intended to act as a unifying interface for the
+    different sub-systems that are used to monitor the NFVI.
+    """
+
+    def __init__(self, loop, log, config):
+        """Create a Monitor object
+
+        Arguments:
+            loop   - an event loop
+            log    - the logger used by this object
+            config - an instance of InstanceConfiguration
+
+        """
+        self._loop = loop
+        self._log = log
+
+        self._cloud_accounts = dict()
+        self._nfvi_plugins = NfviMetricsPluginManager(log)
+        self._cache = NfviMetricsCache(log, loop, self._nfvi_plugins)
+        self._nfvi_interface = NfviInterface(loop, log, self._nfvi_plugins, self._cache)
+        self._config = config
+        self._vnfrs = dict()
+        self._vnfr_to_vdurs = collections.defaultdict(set)
+        self._alarms = collections.defaultdict(list)
+
+    @property
+    def loop(self):
+        """The event loop used by this object"""
+        return self._loop
+
+    @property
+    def log(self):
+        """The event log used by this object"""
+        return self._log
+
+    @property
+    def cache(self):
+        """The NFVI metrics cache"""
+        return self._cache
+
+    @property
+    def metrics(self):
+        """The list of metrics contained in this Monitor"""
+        return self._nfvi_interface.metrics
+
+    def nfvi_metrics_available(self, account):
+        """Returns a boolean indicating whether NFVI metrics are available
+
+        Arguments:
+            account - the name of the cloud account to check
+
+        Returns:
+            a boolean indicating availability of NFVI metrics
+
+        """
+        if account not in self._cloud_accounts:
+            return False
+
+        cloud_account = self._cloud_accounts[account]
+        return self._nfvi_interface.nfvi_metrics_available(cloud_account)
+
+    def add_cloud_account(self, account):
+        """Add a cloud account to the monitor
+
+        Arguments:
+            account - a cloud account object
+
+        Raises:
+            If the cloud account has already been added to the monitor, an
+            AccountAlreadyRegisteredError is raised.
+
+        """
+        if account.name in self._cloud_accounts:
+            raise AccountAlreadyRegisteredError(account.name)
+
+        self._cloud_accounts[account.name] = account
+
+        if account.account_type == "openstack":
+            self.register_cloud_account(account, "monasca")
+        else:
+            self.register_cloud_account(account, "mock")
+
+    def remove_cloud_account(self, account_name):
+        """Remove a cloud account from the monitor
+
+        Arguments:
+            account_name - removes the cloud account that has this name
+
+        Raises:
+            If the specified cloud account cannot be found, an
+            UnknownAccountError is raised.
+
+        """
+        if account_name not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        # Make sure that there are no VNFRs associated with this account
+        for vnfr in self._vnfrs.values():
+            if vnfr.cloud_account == account_name:
+                raise AccountInUseError()
+
+        del self._cloud_accounts[account_name]
+        self._nfvi_plugins.unregister(account_name)
+
+    def get_cloud_account(self, account_name):
+        """Returns a cloud account by name
+
+        Arguments:
+            account_name - the name of the account to return
+
+        Raises:
+            An UnknownAccountError is raised if there is not account object
+            associated with the provided name
+
+        Returns:
+            A cloud account object
+
+        """
+        if account_name not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        return self._cloud_accounts[account_name]
+
+    def register_cloud_account(self, account, plugin_name):
+        """Register a cloud account with an NFVI plugin
+
+        Note that a cloud account can only be registered for one plugin at a
+        time.
+
+        Arguments:
+            account     - the cloud account to associate with the plugin
+            plugin_name - the name of the plugin to use
+
+        """
+        self._nfvi_plugins.register(account, plugin_name)
+
+    def add_vnfr(self, vnfr):
+        """Add a VNFR to the monitor
+
+        Arguments:
+            vnfr - a VNFR object
+
+        Raises:
+            An UnknownAccountError is raised if the account name contained in
+            the VNFR does not reference a cloud account that has been added to
+            the monitor.
+
+        """
+        if vnfr.cloud_account not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        account = self._cloud_accounts[vnfr.cloud_account]
+
+        for vdur in vnfr.vdur:
+            try:
+                self.add_vdur(account, vdur)
+                self._vnfr_to_vdurs[vnfr.id].add(vdur.id)
+            except (VdurMissingVimIdError, VdurAlreadyRegisteredError):
+                pass
+
+        self._vnfrs[vnfr.id] = vnfr
+
+    def update_vnfr(self, vnfr):
+        """Updates the VNFR information in the monitor
+
+        Arguments:
+            vnfr - a VNFR object
+
+        Raises:
+            An UnknownAccountError is raised if the account name contained in
+            the VNFR does not reference a cloud account that has been added to
+            the monitor.
+
+        """
+        if vnfr.cloud_account not in self._cloud_accounts:
+            raise UnknownAccountError()
+
+        account = self._cloud_accounts[vnfr.cloud_account]
+
+        for vdur in vnfr.vdur:
+            try:
+                self.add_vdur(account, vdur)
+                self._vnfr_to_vdurs[vnfr.id].add(vdur.id)
+            except (VdurMissingVimIdError, VdurAlreadyRegisteredError):
+                pass
+
+    def remove_vnfr(self, vnfr_id):
+        """Remove a VNFR from the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VNFR to remove
+
+        """
+        vdur_ids = self._vnfr_to_vdurs[vnfr_id]
+
+        for vdur_id in vdur_ids:
+            self.remove_vdur(vdur_id)
+
+        del self._vnfrs[vnfr_id]
+        del self._vnfr_to_vdurs[vnfr_id]
+
+    def add_vdur(self, account, vdur):
+        """Adds a VDUR to the monitor
+
+        Adding a VDUR to the monitor will automatically create a NFVI metrics
+        object that is associated with the VDUR so that the monitor cane
+        provide the NFVI metrics associated with the VDUR.
+
+        Arguments:
+            account - the cloud account associated with the VNFR that contains
+                      the provided VDUR
+            vdur    - a VDUR object
+
+        Raises:
+            A VdurMissingVimIdError is raised if the provided VDUR does not
+            contain a VIM ID. A VdurAlreadyRegisteredError is raised if the ID
+            associated with the VDUR has already been registered.
+
+        """
+        if not vdur.vim_id:
+            raise VdurMissingVimIdError(vdur.id)
+
+        if self.is_registered_vdur(vdur.id):
+            raise VdurAlreadyRegisteredError(vdur.id)
+
+        self.cache.create_entry(account, vdur)
+
+    def remove_vdur(self, vdur_id):
+        """Removes a VDUR from the monitor
+
+        Arguments:
+            vdur_id - the ID of the VDUR to remove
+
+        """
+        self.cache.destroy_entry(vdur_id)
+
+        # Schedule any alarms associated with the VDUR for destruction
+        for account_name, alarm_id in self._alarms[vdur_id]:
+            self.loop.create_task(self.destroy_alarm(account_name, alarm_id))
+
+        del self._alarms[vdur_id]
+
+    def list_vdur(self, vnfr_id):
+        """Returns a list of VDURs
+
+        Arguments:
+            vnfr_id - the identifier of the VNFR contains the VDURs
+
+        Returns:
+            A list of VDURs
+
+        """
+        return self._vnfrs[vnfr_id].vdur
+
+    def is_registered_vnfr(self, vnfr_id):
+        """Returns True if the VNFR is registered with the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VNFR to check
+
+        Returns:
+            True if the VNFR is registered and False otherwise.
+
+        """
+        return vnfr_id in self._vnfrs
+
+    def is_registered_vdur(self, vdur_id):
+        """Returns True if the VDUR is registered with the monitor
+
+        Arguments:
+            vnfr_id - the ID of the VDUR to check
+
+        Returns:
+            True if the VDUR is registered and False otherwise.
+
+        """
+        return self.cache.contains_vdur_id(vdur_id)
+
+    def retrieve_nfvi_metrics(self, vdur_id):
+        """Retrieves the NFVI metrics associated with a VDUR
+
+        Arguments:
+            vdur_id - the ID of the VDUR whose metrics are to be retrieved
+
+        Returns:
+            NFVI metrics for a VDUR
+
+        """
+        return self._nfvi_interface.retrieve(vdur_id)
+
+    @asyncio.coroutine
+    def create_alarm(self, account_name, vdur_id, alarm):
+        """Create a new alarm
+
+        This function create an alarm and augments the provided endpoints with
+        endpoints to the launchpad if the launchpad has a public IP. The added
+        endpoints are of the form,
+
+            http://{host}:4568/{platform}/{vdur_id}/{action}
+
+        where the 'action' is one of 'ok', 'alarm', or 'insufficient_data'. The
+        messages that are pushed to the launchpad are not defined by RIFT so
+        we need to know which platform an alarm is sent from in order to
+        properly parse it.
+
+
+        Arguments:
+            account_name - the name of the account to use to create the alarm
+            vdur_id      - the identifier of the VDUR to associated with the
+                           alarm. If the identifier is None, the alarm is not
+                           associated with a specific VDUR.
+            alarm        - the alarm data
+
+        """
+        account = self.get_cloud_account(account_name)
+        vim_id = self.cache.to_vim_id(vdur_id)
+
+        # If the launchpad has a public IP, augment the action webhooks to
+        # include the launchpad so that the alarms can be broadcast as event
+        # notifications.
+        if self._config.public_ip is not None:
+            url = "http://{host}:4568/{platform}/{vdur_id}".format(
+                    host=self._config.public_ip,
+                    platform=account.account_type,
+                    vdur_id=vudr_id,
+                    )
+            alarm.actions.ok.add().url = url + "/ok"
+            alarm.actions.alarm.add().url = url + "/alarm"
+            alarm.actions.alarm.add().url = url + "/insufficient_data"
+
+        yield from self._nfvi_interface.alarm_create(account, vim_id, alarm)
+
+        # Associate the VDUR ID with the alarm ID
+        self._alarms[vdur_id].append((account_name, alarm.alarm_id))
+
+    @asyncio.coroutine
+    def destroy_alarm(self, account_name, alarm_id):
+        """Destroy an existing alarm
+
+        Arugments:
+            account_name - the name of the account that owns the alert
+            alarm_id     - the identifier of the alarm to destroy
+
+        """
+        account = self.get_cloud_account(account_name)
+        yield from self._nfvi_interface.alarm_destroy(account, alarm_id)
diff --git a/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py b/rwlaunchpad/plugins/rwmonitor/rift/tasklets/rwmonitor/tasklet.py
new file mode 100644 (file)
index 0000000..4ab351e
--- /dev/null
@@ -0,0 +1,714 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+"""
+NFVI MONITORING
+==================================================
+
+Data Model
+--------------------------------------------------
+
+The monitoring tasklet consists of several types of data that are associated
+with one another. The highest level data are the cloud accounts. These objects
+contain authentication information that is used to retrieve metrics as well as
+the provider (and hence the available data source platforms).
+
+Each cloud account is associated with an NfviMetricsPlugin. This is a
+one-to-one relationship. The plugin is the interface to the data source that
+will actually provide the NFVI metrics.
+
+Each cloud account is also associated with several VNFRs. Each VNFR, in turn,
+contains several VDURs. The VDURs represent the level that the NFVI metrics are
+collected at. However, it is important that the relationships among all these
+different objects are carefully managed.
+
+
+        CloudAccount -------------- NfviMetricsPlugin
+            / \
+           /   \
+          / ... \
+         /       \
+       VNFR     VNFR
+                 /\
+                /  \
+               /    \
+              / .... \
+             /        \
+           VDUR      VDUR
+            |          |
+            |          |
+         Metrics     Metrics
+
+
+Monitoring Tasklet
+--------------------------------------------------
+
+The monitoring tasklet (the MonitorTasklet class) is primarily responsible for
+the communicating between DTS and the application (the Monitor class), which
+provides the logic for managing and interacting with the data model (see
+above).
+
+"""
+
+import asyncio
+import concurrent.futures
+import time
+
+import tornado.web
+import tornado.httpserver
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwMonitorYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwLog as rwlog,
+    RwMonitorYang as rwmonitor,
+    RwLaunchpadYang,
+    RwVnfrYang,
+    VnfrYang,
+)
+
+import rift.tasklets
+import rift.mano.cloud
+
+from . import core
+
+
+class DtsHandler(object):
+    def __init__(self, tasklet):
+        self.reg = None
+        self.tasklet = tasklet
+
+    @property
+    def log(self):
+        return self.tasklet.log
+
+    @property
+    def log_hdl(self):
+        return self.tasklet.log_hdl
+
+    @property
+    def dts(self):
+        return self.tasklet.dts
+
+    @property
+    def loop(self):
+        return self.tasklet.loop
+
+    @property
+    def classname(self):
+        return self.__class__.__name__
+
+class VnfrCatalogSubscriber(DtsHandler):
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                if msg is None:
+                    return
+
+                if action == rwdts.QueryAction.CREATE:
+                    self.tasklet.on_vnfr_create(msg)
+
+                elif action == rwdts.QueryAction.UPDATE:
+                    self.tasklet.on_vnfr_update(msg)
+
+                elif action == rwdts.QueryAction.DELETE:
+                    self.tasklet.on_vnfr_delete(msg)
+
+            except Exception as e:
+                self.log.exception(e)
+
+            finally:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_prepare=on_prepare,
+                )
+
+        with self.dts.group_create() as group:
+            group.register(
+                    xpath=VnfrCatalogSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    handler=handler,
+                    )
+
+
+class NsInstanceConfigSubscriber(DtsHandler):
+    XPATH = "C,/nsr:ns-instance-config"
+
+    @asyncio.coroutine
+    def register(self):
+        def on_apply(dts, acg, xact, action, _):
+            xact_config = list(self.reg.get_xact_elements(xact))
+            for config in xact_config:
+                self.tasklet.on_ns_instance_config_update(config)
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=on_apply,
+                        )
+
+        with self.dts.appconf_group_create(acg_handler) as acg:
+            self.reg = acg.register(
+                    xpath=NsInstanceConfigSubscriber.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER,
+                    )
+
+
+class CloudAccountDtsHandler(DtsHandler):
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._cloud_cfg_subscriber = None
+
+    def register(self):
+        self.log.debug("creating cloud account config handler")
+        self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
+               self.dts, self.log, self.log_hdl,
+               rift.mano.cloud.CloudAccountConfigCallbacks(
+                   on_add_apply=self.tasklet.on_cloud_account_create,
+                   on_delete_apply=self.tasklet.on_cloud_account_delete,
+               )
+           )
+        self._cloud_cfg_subscriber.register()
+
+
+class VdurNfviMetricsPublisher(DtsHandler):
+    """
+    A VdurNfviMetricsPublisher is responsible for publishing the NFVI metrics
+    from a single VDU.
+    """
+
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id='{}']/vnfr:vdur[vnfr:id='{}']/rw-vnfr:nfvi-metrics"
+
+    # This timeout defines the length of time the publisher will wait for a
+    # request to a data source to complete. If the request cannot be completed
+    # before timing out, the current data will be published instead.
+    TIMEOUT = 2.0
+
+    def __init__(self, tasklet, vnfr, vdur):
+        """Create an instance of VdurNvfiPublisher
+
+        Arguments:
+            tasklet - the tasklet
+            vnfr    - the VNFR that contains the VDUR
+            vdur    - the VDUR of the VDU whose metrics are published
+
+        """
+        super().__init__(tasklet)
+        self._vnfr = vnfr
+        self._vdur = vdur
+
+        self._handle = None
+        self._xpath = VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id)
+
+        self._deregistered = asyncio.Event(loop=self.loop)
+
+    @property
+    def vnfr(self):
+        """The VNFR associated with this publisher"""
+        return self._vnfr
+
+    @property
+    def vdur(self):
+        """The VDUR associated with this publisher"""
+        return self._vdur
+
+    @property
+    def vim_id(self):
+        """The VIM ID of the VDUR associated with this publisher"""
+        return self._vdur.vim_id
+
+    @property
+    def xpath(self):
+        """The XPATH that the metrics are published on"""
+        return self._xpath
+
+    @asyncio.coroutine
+    def dts_on_prepare(self, xact_info, action, ks_path, msg):
+        """Handles the DTS on_prepare callback"""
+        self.log.debug("{}:dts_on_prepare".format(self.classname))
+
+        if action == rwdts.QueryAction.READ:
+            # If the publisher has been deregistered, the xpath element has
+            # been deleted. So we do not want to publish the metrics and
+            # re-created the element.
+            if not self._deregistered.is_set():
+                metrics = self.tasklet.on_retrieve_nfvi_metrics(self.vdur.id)
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.MORE,
+                        self.xpath,
+                        metrics,
+                        )
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, self.xpath)
+
+    @asyncio.coroutine
+    def register(self):
+        """Register the publisher with DTS"""
+        self._handle = yield from self.dts.register(
+                xpath=self.xpath,
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=self.dts_on_prepare,
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister the publisher from DTS"""
+        # Mark the publisher for deregistration. This prevents the publisher
+        # from creating an element after it has been deleted.
+        self._deregistered.set()
+
+        # Now that we are done with the registration handle, delete the element
+        # and tell DTS to deregister it
+        self._handle.delete_element(self.xpath)
+        self._handle.deregister()
+        self._handle = None
+
+
+class LaunchpadConfigDtsSubscriber(DtsHandler):
+    """
+    This class subscribes to the launchpad configuration and alerts the tasklet
+    to any relevant changes.
+    """
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def apply_config(dts, acg, xact, action, _):
+            if xact.xact is None:
+                # When RIFT first comes up, an INSTALL is called with the current config
+                # Since confd doesn't actally persist data this never has any data so
+                # skip this for now.
+                self.log.debug("No xact handle. Skipping apply config")
+                return
+
+            try:
+                cfg = list(self.reg.get_xact_elements(xact))[0]
+                if cfg.public_ip != self.tasklet.public_ip:
+                    yield from self.tasklet.on_public_ip(cfg.public_ip)
+
+            except Exception as e:
+                self.log.exception(e)
+
+        try:
+            acg_handler = rift.tasklets.AppConfGroup.Handler(
+                            on_apply=apply_config,
+                            )
+
+            with self.dts.appconf_group_create(acg_handler) as acg:
+                self.reg = acg.register(
+                        xpath="C,/rw-launchpad:launchpad-config",
+                        flags=rwdts.Flag.SUBSCRIBER,
+                        )
+
+        except Exception as e:
+            self.log.exception(e)
+
+
+class CreateAlarmRPC(DtsHandler):
+    """
+    This class is used to listen for RPC calls to /vnfr:create-alarm, and pass
+    them on to the tasklet.
+    """
+
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._handle = None
+
+    @asyncio.coroutine
+    def register(self):
+        """Register this handler with DTS"""
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                response = VnfrYang.YangOutput_Vnfr_CreateAlarm()
+                response.alarm_id = yield from self.tasklet.on_create_alarm(
+                        msg.cloud_account,
+                        msg.vdur_id,
+                        msg.alarm,
+                        )
+
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.ACK,
+                        "O,/vnfr:create-alarm",
+                        response,
+                        )
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+        self._handle = yield from self.dts.register(
+                xpath="I,/vnfr:create-alarm",
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister this handler"""
+        self._handle.deregister()
+        self._handle = None
+
+
+class DestroyAlarmRPC(DtsHandler):
+    """
+    This class is used to listen for RPC calls to /vnfr:destroy-alarm, and pass
+    them on to the tasklet.
+    """
+
+    def __init__(self, tasklet):
+        super().__init__(tasklet)
+        self._handle = None
+
+    @asyncio.coroutine
+    def register(self):
+        """Register this handler with DTS"""
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            try:
+                yield from self.tasklet.on_destroy_alarm(
+                        msg.cloud_account,
+                        msg.alarm_id,
+                        )
+
+                xact_info.respond_xpath(
+                        rwdts.XactRspCode.ACK,
+                        "O,/vnfr:destroy-alarm"
+                        )
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+        self._handle = yield from self.dts.register(
+                xpath="I,/vnfr:destroy-alarm",
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def deregister(self):
+        """Deregister this handler"""
+        self._handle.deregister()
+        self._handle = None
+
+
+class Delegate(object):
+    """
+    This class is used to delegate calls to collections of listener objects.
+    The listeners are expected to conform to the required function arguments,
+    but this is not enforced by the Delegate class itself.
+    """
+
+    def __init__(self):
+        self._listeners = list()
+
+    def __call__(self, *args, **kwargs):
+        """Delegate the call to the registered listeners"""
+        for listener in self._listeners:
+            listener(*args, **kwargs)
+
+    def register(self, listener):
+        """Register a listener
+
+        Arguments:
+            listener - an object that function calls will be delegated to
+
+        """
+        self._listeners.append(listener)
+
+
+class WebhookHandler(tornado.web.RequestHandler):
+    @property
+    def log(self):
+        return self.application.tasklet.log
+
+    def options(self, *args, **kargs):
+        pass
+
+    def set_default_headers(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers', 'Content-Type, Cache-Control, Accept, X-Requested-With, Authorization')
+        self.set_header('Access-Control-Allow-Methods', 'POST')
+
+    def post(self, action, vim_id):
+        pass
+
+
+class WebhookApplication(tornado.web.Application):
+    DEFAULT_WEBHOOK_PORT = 4568
+
+    def __init__(self, tasklet):
+        self.tasklet = tasklet
+
+        super().__init__([
+                (r"/([^/]+)/([^/]+)/?", WebhookHandler),
+                ])
+
+
+class MonitorTasklet(rift.tasklets.Tasklet):
+    """
+    The MonitorTasklet provides a interface for DTS to interact with an
+    instance of the Monitor class. This allows the Monitor class to remain
+    independent of DTS.
+    """
+
+    DEFAULT_POLLING_PERIOD = 1.0
+
+    def __init__(self, *args, **kwargs):
+        try:
+            super().__init__(*args, **kwargs)
+            self.rwlog.set_category("rw-monitor-log")
+
+            self.vnfr_subscriber = VnfrCatalogSubscriber(self)
+            self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
+            self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
+            self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
+
+            self.config = core.InstanceConfiguration()
+            self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+
+            self.monitor = core.Monitor(self.loop, self.log, self.config)
+            self.vdur_handlers = dict()
+
+            self.webhooks = None
+            self.create_alarm_rpc = CreateAlarmRPC(self)
+            self.destroy_alarm_rpc = DestroyAlarmRPC(self)
+
+
+        except Exception as e:
+            self.log.exception(e)
+
+    @property
+    def polling_period(self):
+        return self.config.polling_period
+
+    @property
+    def public_ip(self):
+        """The public IP of the launchpad"""
+        return self.config.public_ip
+
+    def start(self):
+        super().start()
+        self.log.info("Starting MonitoringTasklet")
+
+        self.log.debug("Registering with dts")
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+      try:
+          self.dts.deinit()
+      except Exception as e:
+          self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating cloud account handler")
+        self.cloud_cfg_subscriber.register()
+
+        self.log.debug("creating launchpad config subscriber")
+        yield from self.launchpad_cfg_subscriber.register()
+
+        self.log.debug("creating NS instance config subscriber")
+        yield from  self.ns_instance_config_subscriber.register()
+
+        self.log.debug("creating vnfr subscriber")
+        yield from self.vnfr_subscriber.register()
+
+        self.log.debug("creating create-alarm rpc handler")
+        yield from self.create_alarm_rpc.register()
+
+        self.log.debug("creating destroy-alarm rpc handler")
+        yield from self.destroy_alarm_rpc.register()
+
+        self.log.debug("creating webhook server")
+        loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+        self.webhooks = WebhookApplication(self)
+        self.server = tornado.httpserver.HTTPServer(
+            self.webhooks,
+            io_loop=loop,
+        )
+
+    @asyncio.coroutine
+    def on_public_ip(self, ip):
+        """Store the public IP of the launchpad
+
+        Arguments:
+            ip - a string containing the public IP address of the launchpad
+
+        """
+        self.config.public_ip = ip
+
+    def on_ns_instance_config_update(self, config):
+        """Update configuration information
+
+        Arguments:
+            config - an NsInstanceConfig object
+
+        """
+        if config.nfvi_polling_period is not None:
+            self.config.polling_period = config.nfvi_polling_period
+
+    def on_cloud_account_create(self, account):
+        self.monitor.add_cloud_account(account.cal_account_msg)
+
+    def on_cloud_account_delete(self, account_name):
+        self.monitor.remove_cloud_account(account_name)
+
+    @asyncio.coroutine
+    def run(self):
+        self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def on_vnfr_create(self, vnfr):
+        if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
+            msg = "NFVI metrics unavailable for {}"
+            self.log.warning(msg.format(vnfr.cloud_account))
+            return
+
+        self.monitor.add_vnfr(vnfr)
+
+        # Create NFVI handlers for VDURs
+        for vdur in vnfr.vdur:
+            if vdur.vim_id is not None:
+                coro = self.register_vdur_nfvi_handler(vnfr, vdur)
+                self.loop.create_task(coro)
+
+    def on_vnfr_update(self, vnfr):
+        if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
+            msg = "NFVI metrics unavailable for {}"
+            self.log.warning(msg.format(vnfr.cloud_account))
+            return
+
+        self.monitor.update_vnfr(vnfr)
+
+        # TODO handle the removal of vdurs
+        for vdur in vnfr.vdur:
+            if vdur.vim_id is not None:
+                coro = self.register_vdur_nfvi_handler(vnfr, vdur)
+                self.loop.create_task(coro)
+
+    def on_vnfr_delete(self, vnfr):
+        self.monitor.remove_vnfr(vnfr.id)
+
+        # Delete any NFVI handlers associated with the VNFR
+        for vdur in vnfr.vdur:
+            self.deregister_vdur_nfvi_handler(vdur.id)
+
+    def on_retrieve_nfvi_metrics(self, vdur_id):
+        return self.monitor.retrieve_nfvi_metrics(vdur_id)
+
+    @asyncio.coroutine
+    def register_vdur_nfvi_handler(self, vnfr, vdur):
+        if vdur.vim_id is None:
+            return
+
+        if vdur.operational_status != "running":
+            return
+
+        if vdur.id not in self.vdur_handlers:
+            publisher = VdurNfviMetricsPublisher(self, vnfr, vdur)
+            yield from publisher.register()
+            self.vdur_handlers[vdur.id] = publisher
+
+    def deregister_vdur_nfvi_handler(self, vdur_id):
+        if vdur_id in self.vdur_handlers:
+            handler = self.vdur_handlers[vdur_id]
+
+            del self.vdur_handlers[vdur_id]
+            handler.deregister()
+
+    @asyncio.coroutine
+    def on_create_alarm(self, account, vdur_id, alarm):
+        """Creates an alarm and returns an alarm ID
+
+        Arguments:
+            account - a name of the cloud account used to authenticate the
+                      creation of an alarm
+            vdur_id - the identifier of VDUR to create the alarm for
+            alarm   - a structure defining the alarm that should be created
+
+        Returns:
+            An identifier specific to the created alarm
+
+        """
+        return (yield from self.monitor.create_alarm(account, vdur_id, alarm))
+
+    @asyncio.coroutine
+    def on_destroy_alarm(self, account, alarm_id):
+        """Destroys an alarm with the specified identifier
+
+        Arguments:
+            account  - the name of the cloud account used to authenticate the
+                       destruction of the alarm
+            alarm_id - the identifier of the alarm to destroy
+
+        """
+        yield from self.monitor.destroy_alarm(account, alarm_id)
diff --git a/rwlaunchpad/plugins/rwmonitor/rwmonitor.py b/rwlaunchpad/plugins/rwmonitor/rwmonitor.py
new file mode 100755 (executable)
index 0000000..497e917
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwmonitor
+
+class Tasklet(rift.tasklets.rwmonitor.MonitorTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwmonparam/CMakeLists.txt b/rwlaunchpad/plugins/rwmonparam/CMakeLists.txt
new file mode 100644 (file)
index 0000000..ad63593
--- /dev/null
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/07/01
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwmonparam)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/aggregator.py
+    rift/tasklets/${TASKLET_NAME}/nsr_core.py
+    rift/tasklets/${TASKLET_NAME}/vnfr_core.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/__init__.py
new file mode 100644 (file)
index 0000000..b775943
--- /dev/null
@@ -0,0 +1 @@
+from .rwmonparam import MonitoringParameterTasklet
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/aggregator.py
new file mode 100644 (file)
index 0000000..47b1d15
--- /dev/null
@@ -0,0 +1,160 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file aggregator.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+import abc
+import functools
+
+
+class IncompatibleAggregationType(Exception):
+    pass
+
+class InvalidAggregationType(Exception):
+    pass
+
+class InvalidAggregationOperation(Exception):
+    pass
+
+class InvalidAggregationValues(Exception):
+    pass
+
+
+def make_aggregator(field_types):
+    """A factory method to create the aggregator based on the field type
+    [value_interger, value_string or value_decimal] 
+    
+    Args:
+        field_types (list): list of field types to aggregate
+        values (list): List of values
+        aggregation_type (str): Type of aggregation.
+    
+    Returns:
+        subclass of ValueAggregator
+    
+    Raises:
+        InvalidAggregationType: If Unknown aggregation type is provided
+        InvalidAggregationValues: Raised if a mix of field types are provided.
+    """
+    if len(set(field_types)) != 1:
+        raise InvalidAggregationValues(
+            "Multiple value types provided for aggrgation {}".format(field_types))
+
+    field_type = field_types[0]
+
+    if field_type == IntValueAggregator.field_name():
+        return IntValueAggregator()
+    elif field_type == DecimalValueAggregator.field_name():
+        return DecimalValueAggregator()
+    elif field_type == StringValueAggregator.field_name():
+        return StringValueAggregator()
+
+    raise InvalidAggregationType("Invalid aggregation type")
+
+
+class ValueAggregator():
+    """Base class that defines all the basic operations.
+    
+    Attributes:
+        aggregation_type (str): Aggregation type to be used to select the
+                appropriate method.
+        values (list): List of values to aggregate.
+    """
+    @classmethod
+    @abc.abstractmethod
+    def field_name(self):
+        pass
+
+    def average(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation AVERAGE for {}".format(self.values))
+
+    def sum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation SUM for {}".format(self.values))
+
+    def maximum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation MAXIMUM for {}".format(self.values))
+
+    def minimum(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation MINIMUM for {}".format(self.values))
+
+    def count(self, values):
+        raise InvalidAggregationOperation(
+                "Invalid operation COUNT for {}".format(self.values))
+
+    def aggregate(self, aggregation_type, values):
+        OP_MAP = {
+                "AVERAGE": self.average,
+                "SUM": self.sum,
+                "MAXIMUM": self.maximum,
+                "MINIMUM": self.minimum,
+                "COUNT": self.count
+            }
+
+        op_func = OP_MAP.get(aggregation_type, None)
+
+        if op_func is None:
+            raise InvalidAggregationType("Unknown Aggregation type provided.")
+
+        return self.field_name(), op_func(values)
+
+
+class StringValueAggregator(ValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_string"
+
+
+class DecimalValueAggregator(ValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_decimal"
+
+    def average(self, values):
+        avg = functools.reduce(lambda x, y: x + y, values) / len(values)
+        return avg
+
+    def sum(self, values):
+        return functools.reduce(lambda x, y: x + y, values)
+
+    def maximum(self, values):
+        return max(values)
+
+    def minimum(self, values):
+        return min(values)
+
+    def count(self, values):
+        return len(values)
+
+
+class IntValueAggregator(DecimalValueAggregator):
+
+    @classmethod
+    def field_name(self):
+        return "value_integer"
+
+    def average(self, values):
+        avg = functools.reduce(lambda x, y: x + y, values) / len(values)
+        return int(avg)
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/nsr_core.py
new file mode 100644 (file)
index 0000000..b1b9cd0
--- /dev/null
@@ -0,0 +1,417 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file nsr_core.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 09-Jul-2016
+
+"""
+
+import asyncio
+import functools
+import uuid
+
+from gi.repository import (RwDts as rwdts, NsrYang)
+import rift.mano.dts as mano_dts
+
+from . import aggregator as aggregator
+
+
+class MissingValueField(Exception):
+    pass
+
+
+class VnfrMonitoringParamSubscriber(mano_dts.AbstractOpdataSubscriber):
+    """Registers for VNFR monitoring parameter changes.
+    
+    Attributes:
+        monp_id (str): Monitoring Param ID
+        vnfr_id (str): VNFR ID
+    """
+    def __init__(self, log, dts, loop, vnfr_id, monp_id, callback=None):
+        super().__init__(log, dts, loop, callback)
+        self.vnfr_id = vnfr_id
+        self.monp_id = monp_id
+
+    def get_xpath(self):
+        return("D,/vnfr:vnfr-catalog" +
+               "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id) +
+               "/vnfr:monitoring-param" +
+               "[vnfr:id='{}']".format(self.monp_id))
+
+
+class NsrMonitoringParam():
+    """Class that handles NS Mon-param data.
+    """
+    MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+    MISSING = None
+    DEFAULT_AGGREGATION_TYPE = "AVERAGE"
+
+    @classmethod
+    def create_nsr_mon_params(cls, nsd, constituent_vnfrs, store):
+        """Convenience class that constructs NSMonitoringParam objects
+        
+        Args:
+            nsd (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd): Nsd object
+            constituent_vnfrs (list): List of constituent vnfr objects of NSR
+            store (SubscriberStore): Store object instance
+        
+        Returns:
+            list NsrMonitoringParam object.
+
+        Also handles legacy NSD descriptor which has no mon-param defines. In
+        such cases the mon-params are created from VNFD's mon-param config.
+        """
+        MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+
+        mon_params = []
+        for mon_param_msg in nsd.monitoring_param:
+            mon_params.append(NsrMonitoringParam(
+                    mon_param_msg,
+                    constituent_vnfrs
+                    ))
+
+        # Legacy Handling.
+        # This indicates that the NSD had no mon-param config.
+        if not nsd.monitoring_param:
+            for vnfr in constituent_vnfrs:
+                vnfd = store.get_vnfd(vnfr.vnfd_ref)
+                for monp in vnfd.monitoring_param:
+                    mon_params.append(NsrMonitoringParam(
+                        monp,
+                        [vnfr],
+                        is_legacy=True))
+
+        return mon_params
+
+    def __init__(self, monp_config, constituent_vnfrs, is_legacy=False):
+        """
+        Args:
+            monp_config (GiObject): Config data to create the NSR mon-param msg
+            constituent_vnfrs (list): List of VNFRs that may contain the mon-param
+            is_legacy (bool, optional): If set then the mon-param are created from
+                vnfd's config and not NSD's config.
+        """
+        self._constituent_vnfr_map = {vnfr.id:vnfr for vnfr in constituent_vnfrs}
+
+        # An internal store to hold the data
+        # Key => (vnfr_id, monp_id)
+        # value => (value_type, value)
+        self.vnfr_monparams = {}
+
+        if not is_legacy:
+            self._msg = self._convert_nsd_msg(monp_config)
+        else:
+            self._msg = self._convert_vnfd_msg(monp_config)
+
+    @property
+    def nsr_mon_param_msg(self):
+        """Gi object msg"""
+        return self._msg
+
+    @property
+    def vnfr_ids(self):
+        """Store Keys"""
+        return list(self.vnfr_monparams.keys())
+
+    @property
+    def vnfr_values(self):
+        """Store values"""
+        return list(self.vnfr_monparams.values())
+
+    @property
+    def is_ready(self):
+        """Flag which indicates if all of the constituent vnfr values are
+        available to perform the aggregation"""
+        return (self.MISSING not in self.vnfr_values)
+
+    @property
+    def aggregation_type(self):
+        """Aggregation type"""
+        return self.nsr_mon_param_msg.aggregation_type
+
+    @property
+    def is_legacy(self):
+        return (self.aggregation_type is None)
+
+    @classmethod
+    def extract_value(cls, monp):
+        """Class method to extract the value type and value from the 
+        mon-param gi message
+        
+        Args:
+            monp (GiObject): Mon param msg
+        
+        Returns:
+            Tuple: (value type, value)
+        
+        Raises:
+            MissingValueField: Raised if no valid field are available.
+        """
+        if monp.has_field("value_integer"):
+            return ("value_integer", monp.value_integer)
+        elif monp.has_field("value_decimal"):
+            return ("value_decimal", monp.value_decimal)
+        elif monp.has_field("value_string"):
+            return ("value_string", monp.value_string)
+
+        return None
+
+    def _constituent_vnfrs(self, constituent_vnfr_ids):
+        # Fetch the VNFRs
+        vnfr_map = {}
+        for constituent_vnfr in constituent_vnfr_ids:
+            vnfr_id = constituent_vnfr.vnfr_id
+            vnfr_map[vnfr_id] = self._store.get_vnfr(vnfr_id)
+
+        return vnfr_map
+
+    def _extract_ui_elements(self, monp):
+        ui_fields = ["group_tag", "description", "widget_type", "units", "value_type"]
+        ui_data = [getattr(monp, ui_field) for ui_field in ui_fields]
+
+        return dict(zip(ui_fields, ui_data))
+
+
+    def _convert_nsd_msg(self, nsd_monp):
+        """Create initial msg without values"""
+        vnfd_to_vnfr = {vnfr.vnfd_ref: vnfr_id
+                for vnfr_id, vnfr in self._constituent_vnfr_map.items()}
+
+        # First, convert the monp param ref from vnfd to vnfr terms.
+        vnfr_mon_param_ref = []
+        for vnfd_mon in nsd_monp.vnfd_monitoring_param:
+            vnfr_id = vnfd_to_vnfr[vnfd_mon.vnfd_id_ref]
+            monp_id = vnfd_mon.vnfd_monitoring_param_ref
+
+            self.vnfr_monparams[(vnfr_id, monp_id)] = self.MISSING
+
+            vnfr_mon_param_ref.append({
+                'vnfr_id_ref': vnfr_id,
+                'vnfr_mon_param_ref': monp_id
+                })
+
+        monp_fields = {
+                # For now both the NSD and NSR's monp ID are same.
+                'id': nsd_monp.id,
+                'name': nsd_monp.name,
+                'nsd_mon_param_ref': nsd_monp.id,
+                'vnfr_mon_param_ref': vnfr_mon_param_ref,
+                'aggregation_type': nsd_monp.aggregation_type
+            }
+
+        ui_fields = self._extract_ui_elements(nsd_monp)
+        monp_fields.update(ui_fields)
+        monp = self.MonParamMsg.from_dict(monp_fields)
+
+        return monp
+
+    def _convert_vnfd_msg(self, vnfd_monp):
+
+        vnfr = list(self._constituent_vnfr_map.values())[0]
+        self.vnfr_monparams[(vnfr.id, vnfd_monp.id)] = self.MISSING
+
+        monp_data = {
+                'id': str(uuid.uuid1()),
+                'name': vnfd_monp.name,
+                'vnfr_mon_param_ref': [{
+                    'vnfr_id_ref': vnfr.id,
+                    'vnfr_mon_param_ref': vnfd_monp.id
+                    }]
+                }
+
+        ui_fields = self._extract_ui_elements(vnfd_monp)
+        monp_data.update(ui_fields)
+        monp = self.MonParamMsg.from_dict(monp_data)
+
+        return monp
+
+    def update_vnfr_value(self, key, value):
+        """Update the internal store
+
+        Args:
+            key (Tuple): (vnfr_id, monp_id)
+            value (Tuple): (value_type, value)
+        """
+        self.vnfr_monparams[key] = value
+
+    def update_ns_value(self, value_field, value):
+        """Updates the NS mon-param data with the aggregated value.
+
+        Args:
+            value_field (str): Value field in NSR
+            value : Aggregated value
+        """
+        setattr(self.nsr_mon_param_msg, value_field, value)
+
+
+class NsrMonitoringParamPoller(mano_dts.DtsHandler):
+    """Handler responsible for publishing NS level monitoring
+    parameters.
+
+    Design:
+        1. Created subscribers for each vnfr's monitoring parameter
+        2. Accumulates the VNFR's value into the NsrMonitoringParam's internal
+            store.
+        3. Once all values are available, aggregate the value and triggers
+            callback notification to the subscribers.
+    """
+    @classmethod
+    def from_handler(cls, handler, monp, callback):
+        """Convenience class to build NsrMonitoringParamPoller object.
+        """
+        return cls(handler.log, handler.dts, handler.loop, monp, callback)
+
+    def __init__(self, log, dts, loop, monp, callback=None):
+        """
+        Args:
+            monp (NsrMonitoringParam): Param object
+            callback (None, optional): Callback to be triggered after value has
+                been aggregated.
+        """
+        super().__init__(log, dts, loop)
+
+        self.monp = monp
+        self.subscribers = []
+        self.callback = callback
+        self._agg = None
+
+    def make_aggregator(self, field_types):
+        if not self._agg:
+            self._agg = aggregator.make_aggregator(field_types)
+        return self._agg
+
+
+    def update_value(self, monp, action, vnfr_id):
+        """Callback that gets triggered when VNFR's mon param changes.
+
+        Args:
+            monp (Gi Object): Gi object msg
+            action (rwdts.QueryAction)): Action type
+            vnfr_id (str): Vnfr ID
+        """
+        key = (vnfr_id, monp.id)
+        value = NsrMonitoringParam.extract_value(monp)
+
+        if not value:
+            return
+
+        # Accumulate the value
+        self.monp.update_vnfr_value(key, value)
+
+        # If all values are not available, then don't start
+        # the aggregation process.
+        if not self.monp.is_ready:
+            return
+
+        if self.monp.is_legacy:
+            # If no monp are specified then copy over the vnfr's monp data
+            value_field, value = value
+        else:
+            field_types, values = zip(*self.monp.vnfr_values)
+
+            value_field, value = self.make_aggregator(field_types).aggregate(
+                    self.monp.aggregation_type,
+                    values)
+
+        self.monp.update_ns_value(value_field, value)
+        if self.callback:
+            self.callback(self.monp.nsr_mon_param_msg)
+
+    @asyncio.coroutine
+    def register(self):
+        for vnfr_id, monp_id in self.monp.vnfr_ids:
+            callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
+            self.subscribers.append(VnfrMonitoringParamSubscriber(
+                self.loop, self.dts, self.loop, vnfr_id, monp_id, callback=callback))
+
+    @asyncio.coroutine
+    def start(self):
+        for sub in self.subscribers:
+            yield from sub.register()
+
+    def stop(self):
+        for sub in self.subscribers:
+            sub.deregister()
+
+
+class NsrMonitorDtsHandler(mano_dts.DtsHandler):
+    """ NSR monitoring class """
+
+    def __init__(self, log, dts, loop, nsr, constituent_vnfrs, store):
+        """
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): NSR object
+            constituent_vnfrs (list): list of VNFRs in NSR
+            store (SubscriberStore): Store instance
+        """
+        super().__init__(log, dts, loop)
+
+        self.nsr = nsr
+        self.store = store
+        self.constituent_vnfrs = constituent_vnfrs
+        self.mon_params_pollers = []
+
+    def xpath(self, param_id=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+            "[nsr:ns-instance-config-ref='{}']".format(self.nsr.ns_instance_config_ref) +
+            "/nsr:monitoring-param" +
+            ("[nsr:id='{}']".format(param_id) if param_id else ""))
+
+    @asyncio.coroutine
+    def register(self):
+        self.reg = yield from self.dts.register(xpath=self.xpath(),
+                  flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+        assert self.reg is not None
+
+    def callback(self, nsr_mon_param_msg):
+        """Callback that triggers update.
+        """
+        self.reg.update_element(
+                self.xpath(param_id=nsr_mon_param_msg.id),
+                nsr_mon_param_msg)
+
+    @asyncio.coroutine
+    def start(self):
+        nsd = self.store.get_nsd(self.nsr.nsd_ref)
+        mon_params = NsrMonitoringParam.create_nsr_mon_params(
+                nsd,
+                self.constituent_vnfrs,
+                self.store)
+
+        for monp in mon_params:
+            poller = NsrMonitoringParamPoller.from_handler(
+                    self,
+                    monp,
+                    callback=self.callback)
+
+            self.mon_params_pollers.append(poller)
+            yield from poller.register()
+            yield from poller.start()
+
+    def stop(self):
+        self.deregister()
+        for poller in self.mon_params_pollers:
+            poller.stop()
+
+
+    def deregister(self):
+        """ de-register with dts """
+        if self.reg is not None:
+            self.reg.deregister()
+            self.reg = None
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/rwmonparam.py
new file mode 100644 (file)
index 0000000..d0f31e3
--- /dev/null
@@ -0,0 +1,216 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file rwmonparam.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@date 01-Jul-2016
+
+"""
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+
+from gi.repository import (
+        RwDts as rwdts,
+        RwLaunchpadYang,
+        ProtobufC)
+import rift.mano.cloud
+import rift.mano.dts as subscriber
+import rift.tasklets
+
+from . import vnfr_core
+from . import nsr_core
+
+
+class MonitoringParameterTasklet(rift.tasklets.Tasklet):
+    """The main task of this Tasklet is to listen for VNFR changes and once the
+    VNFR hits the running state, triggers the monitor.
+    """
+    def __init__(self, *args, **kwargs):
+        try:
+            super().__init__(*args, **kwargs)
+            self.rwlog.set_category("rw-monitor-log")
+        except Exception as e:
+            self.log.exception(e)
+
+        self.vnfr_subscriber = None
+        self.store = None
+
+        self.vnfr_monitors = {}
+        self.nsr_monitors = {}
+
+        # Needs to be moved to store once the DTS bug is resolved
+        self.vnfrs = {}
+
+    def start(self):
+        super().start()
+
+        self.log.info("Starting MonitoringParameterTasklet")
+        self.log.debug("Registering with dts")
+
+        self.dts = rift.tasklets.DTS(
+                self.tasklet_info,
+                RwLaunchpadYang.get_schema(),
+                self.loop,
+                self.on_dts_state_change
+                )
+
+        self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_tasklet(
+                self,
+                callback=self.handle_vnfr)
+        self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_tasklet(
+                self,
+                callback=self.handle_nsr)
+
+        self.store = subscriber.SubscriberStore.from_tasklet(self)
+
+        self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+    def stop(self):
+      try:
+          self.dts.deinit()
+      except Exception as e:
+          self.log.exception(e)
+
+    @asyncio.coroutine
+    def init(self):
+        self.log.debug("creating vnfr subscriber")
+        yield from self.store.register()
+        yield from self.vnfr_subscriber.register()
+        yield from self.nsr_subsriber.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Handle DTS state change
+
+        Take action according to current DTS state to transition application
+        into the corresponding application state
+
+        Arguments
+            state - current dts state
+
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.dts.handle.set_state(next_state)
+
+    def handle_vnfr(self, vnfr, action):
+        """Starts a monitoring parameter job for every VNFR that reaches
+        running state
+
+        Args:
+            vnfr (GiOBject): VNFR Gi object message from DTS
+            delete_mode (bool, optional): if set, stops and removes the monitor.
+        """
+
+        def vnfr_create():
+            # if vnfr.operational_status == "running" and vnfr.id not in self.vnfr_monitors:
+            if vnfr.config_status == "configured" and vnfr.id not in self.vnfr_monitors:
+
+                vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
+                        self,
+                        vnfr,
+                        self.store.get_vnfd(vnfr.vnfd_ref))
+
+                self.vnfr_monitors[vnfr.id] = vnf_mon
+                self.vnfrs[vnfr.id] = vnfr
+
+                @asyncio.coroutine
+                def task():
+                    yield from vnf_mon.register()
+                    vnf_mon.start()
+
+                self.loop.create_task(task())
+
+
+        def vnfr_delete():
+            if vnfr.id in self.vnfr_monitors:
+                self.log.debug("VNFR %s deleted: Stopping vnfr monitoring", vnfr.id)
+                vnf_mon = self.vnfr_monitors.pop(vnfr.id)
+                vnf_mon.stop()
+                self.vnfrs.pop(vnfr.id)
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            vnfr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            vnfr_delete()
+
+
+    def handle_nsr(self, nsr, action):
+        """Callback for NSR opdata changes. Creates a publisher for every
+        NS that moves to config state.
+
+        Args:
+            nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+            action (rwdts.QueryAction): Action type of the change.
+        """
+        def nsr_create():
+            # if nsr.operational_status == "running" and nsr.ns_instance_config_ref not in self.nsr_monitors:
+            if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monitors:
+                nsr_mon = nsr_core.NsrMonitorDtsHandler(
+                        self.log,
+                        self.dts,
+                        self.loop,
+                        nsr,
+                        list(self.vnfrs.values()),
+                        self.store
+                        )
+
+                self.nsr_monitors[nsr.ns_instance_config_ref] = nsr_mon
+
+                @asyncio.coroutine
+                def task():
+                    yield from nsr_mon.register()
+                    yield from nsr_mon.start()
+
+                self.loop.create_task(task())
+
+
+
+        def nsr_delete():
+            if nsr.ns_instance_config_ref in self.nsr_monitors:
+            # if vnfr.operational_status == "running" and vnfr.id in self.vnfr_monitors:
+                nsr_mon = self.nsr_monitors.pop(nsr.ns_instance_config_ref)
+                nsr_mon.stop()
+
+        if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+            nsr_create()
+        elif action == rwdts.QueryAction.DELETE:
+            nsr_delete()
diff --git a/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py b/rwlaunchpad/plugins/rwmonparam/rift/tasklets/rwmonparam/vnfr_core.py
new file mode 100644 (file)
index 0000000..e798376
--- /dev/null
@@ -0,0 +1,700 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import collections
+import concurrent
+import types
+
+import requests
+import requests.auth
+import tornado.escape
+
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+from gi.repository import (
+    RwDts as rwdts,
+    VnfrYang
+    )
+import rift.mano.dts as mano_dts
+import rwlogger
+
+
+class MonitoringParamError(Exception):
+    """Monitoring Parameter error"""
+    pass
+
+
+class JsonPathValueQuerier(object):
+    def __init__(self, log, json_path):
+        self._log = log
+        self._json_path = json_path
+        self._json_path_expr = None
+
+        try:
+            import jsonpath_rw
+            self._json_path_expr = jsonpath_rw.parse(self._json_path)
+        except Exception as e:
+            self._log.error("Could not create json_path parser: %s", str(e))
+
+    def query(self, json_msg):
+        try:
+            json_dict = tornado.escape.json_decode(json_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into json"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        if self._json_path_expr is None:
+            raise MonitoringParamError(
+                    "Parser not created.  Unable to extract value from %s" % json_msg
+                    )
+
+        try:
+            matches = self._json_path_expr.find(json_dict)
+            values = [m.value for m in matches]
+        except Exception as e:
+            raise MonitoringParamError(
+                    "Failed to run find using json_path (%s) against json_msg: %s" %
+                    (self._json_path, str(e))
+                    )
+
+        if len(values) == 0:
+            raise MonitoringParamError(
+                    "No values found from json_path (%s)" % self._json_path
+                    )
+
+        if len(values) > 1:
+            self._log.debug("Got multiple values from json_path (%s).  Only returning the first.",
+                            self._json_path)
+
+        return values[0]
+
+
+class ObjectPathValueQuerier(object):
+    def __init__(self, log, object_path):
+        self._log = log
+        self._object_path = object_path
+        self._object_path_expr = None
+
+    def query(self, object_msg):
+        try:
+            object_dict = tornado.escape.json_decode(object_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into object"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        import objectpath
+        try:
+            tree = objectpath.Tree(object_dict)
+        except Exception as e:
+            msg = "Could not create objectpath tree: %s", str(e)
+            self._log.error(msg)
+            raise MonitoringParamError(msg)
+
+        try:
+            value = tree.execute(self._object_path)
+        except Exception as e:
+            raise MonitoringParamError(
+                    "Failed to run execute object_path (%s) against object_msg: %s" %
+                    (self._object_path, str(e))
+                    )
+
+        if isinstance(value, types.GeneratorType):
+            try:
+                value = next(value)
+            except Exception as e:
+                raise MonitoringParamError(
+                        "Failed to get value from objectpath %s execute generator: %s" %
+                        (self._object_path, str(e))
+                        )
+
+        if isinstance(value, (list, tuple)):
+            if len(value) == 0:
+                raise MonitoringParamError(
+                        "No values found from object_path (%s)" % self._object_path
+                        )
+
+            elif len(value) > 1:
+                self._log.debug(
+                        "Got multiple values from object_path (%s).  "
+                        "Only returning the first.", self._object_path
+                        )
+
+            # Only take the first element
+            value = value[0]
+
+        return value
+
+
+class JsonKeyValueQuerier(object):
+    def __init__(self, log, key):
+        self._log = log
+        self._key = key
+
+    def query(self, json_msg):
+        try:
+            json_dict = tornado.escape.json_decode(json_msg)
+        except ValueError as e:
+            msg = "Failed to convert response into json"
+            self._log.warning(msg)
+            raise MonitoringParamError(e)
+
+        if self._key not in json_dict:
+            msg = "Did not find '{}' key in response: {}".format(
+                    self._key, json_dict
+                    )
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        value = json_dict[self._key]
+
+        return value
+
+
+class ValueConverter(object):
+    def __init__(self, value_type):
+        self._value_type = value_type
+
+    def _convert_int(self, value):
+        if isinstance(value, int):
+            return value
+
+        try:
+            return int(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into integer: %s", str(e)
+                    )
+
+    def _convert_text(self, value):
+        if isinstance(value, str):
+            return value
+
+        try:
+            return str(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into string: %s", str(e)
+                    )
+
+    def _convert_decimal(self, value):
+        if isinstance(value, float):
+            return value
+
+        try:
+            return float(value)
+        except (ValueError, TypeError) as e:
+            raise MonitoringParamError(
+                    "Could not convert value into string: %s", str(e)
+                    )
+
+    def convert(self, value):
+        if self._value_type == "INT":
+            return self._convert_int(value)
+        elif self._value_type == "DECIMAL":
+            return self._convert_decimal(value)
+        elif self._value_type == "STRING":
+            return self._convert_text(value)
+        else:
+            raise MonitoringParamError("Unknown value type: %s", self._value_type)
+
+
+class HTTPBasicAuth(object):
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+
+
+class HTTPEndpoint(object):
+    def __init__(self, log, loop, ip_address, ep_msg):
+        self._log = log
+        self._loop = loop
+        self._ip_address = ip_address
+        self._ep_msg = ep_msg
+
+        # This is to suppress HTTPS related warning as we do not support
+        # certificate verification yet
+        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+        self._session = requests.Session()
+        self._auth = None
+        self._headers = None
+
+    @property
+    def poll_interval(self):
+        return self._ep_msg.polling_interval_secs
+
+    @property
+    def ip_address(self):
+        return self._ip_address
+
+    @property
+    def port(self):
+        return self._ep_msg.port
+
+    @property
+    def protocol(self):
+        if self._ep_msg.has_field("https"):
+           if self._ep_msg.https is True:
+               return "https"
+
+        return "http"
+
+    @property
+    def path(self):
+        return self._ep_msg.path
+
+    @property
+    def method(self):
+        if self._ep_msg.has_field("method"):
+           return self._ep_msg.method
+        return "GET"
+
+    @property
+    def username(self):
+        if self._ep_msg.has_field("username"):
+            return self._ep_msg.username
+
+        return None
+
+    @property
+    def headers(self):
+        if self._headers is None:
+            headers = {}
+            for header in self._ep_msg.headers:
+                if header.has_field("key") and header.has_field("value"):
+                    headers[header.key] = header.value
+
+            self._headers = headers
+
+        return self._headers
+
+    @property
+    def password(self):
+        if self._ep_msg.has_field("password"):
+            return self._ep_msg.password
+
+        return None
+
+    @property
+    def auth(self):
+        if self._auth is None:
+            if self.username is not None and self.password is not None:
+                self._auth = requests.auth.HTTPBasicAuth(
+                        self.username,
+                        self.password,
+                        )
+
+        return self._auth
+
+    @property
+    def url(self):
+        url = "{protocol}://{ip_address}:{port}/{path}".format(
+                protocol=self.protocol,
+                ip_address=self.ip_address,
+                port=self.port,
+                path=self.path.lstrip("/"),
+                )
+
+        return url
+
+    def _poll(self):
+        try:
+            resp = self._session.request(
+                    self.method, self.url, timeout=10, auth=self.auth,
+                    headers=self.headers, verify=False
+                    )
+            resp.raise_for_status()
+        except requests.exceptions.RequestException as e:
+            msg = "Got HTTP error when request monitoring method {} from url {}: {}".format(
+                    self.method,
+                    self.url,
+                    str(e),
+                    )
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        return resp.text
+
+    @asyncio.coroutine
+    def poll(self):
+        try:
+            with concurrent.futures.ThreadPoolExecutor(1) as executor:
+                resp = yield from self._loop.run_in_executor(
+                        executor,
+                        self._poll,
+                        )
+
+        except MonitoringParamError as e:
+            msg = "Caught exception when polling http endpoint: %s" % str(e)
+            self._log.warning(msg)
+            raise MonitoringParamError(msg)
+
+        self._log.debug("Got response from http endpoint (%s): %s",
+                        self.url, resp)
+
+        return resp
+
+
+class MonitoringParam(object):
+    def __init__(self, log, vnfr_mon_param_msg):
+        self._log = log
+        self._vnfr_mon_param_msg = vnfr_mon_param_msg
+
+        self._current_value = None
+
+        self._json_querier = self._create_json_querier()
+        self._value_converter = ValueConverter(self.value_type)
+
+    def _create_json_querier(self):
+        if self.msg.json_query_method == "NAMEKEY":
+            return JsonKeyValueQuerier(self._log, self.msg.name)
+        elif self.msg.json_query_method == "JSONPATH":
+            if not self.msg.json_query_params.has_field("json_path"):
+                msg = "JSONPATH query_method requires json_query_params.json_path to be filled in %s"
+                self._log.error(msg, self.msg)
+                raise ValueError(msg)
+            return JsonPathValueQuerier(self._log, self.msg.json_query_params.json_path)
+        elif self.msg.json_query_method == "OBJECTPATH":
+            if not self.msg.json_query_params.has_field("object_path"):
+                msg = "OBJECTPATH query_method requires json_query_params.object_path to be filled in %s"
+                self._log.error(msg, self.msg)
+                raise ValueError(msg)
+            return ObjectPathValueQuerier(self._log, self.msg.json_query_params.object_path)
+        else:
+            msg = "Unknown JSON query method: %s" % self.json_query_method
+            self._log.error(msg)
+            raise ValueError(msg)
+
+    @property
+    def current_value(self):
+        return self._current_value
+
+    @property
+    def msg(self):
+        msg = self._vnfr_mon_param_msg
+        value_type = msg.value_type
+
+        if self._current_value is None:
+            return msg
+
+        if value_type == "INT":
+            msg.value_integer = self._current_value
+
+        elif value_type == "DECIMAL":
+            msg.value_decimal = self._current_value
+
+        elif value_type == "STRING":
+            msg.value_string = self._current_value
+
+        else:
+            self._log.debug("Unknown value_type: %s", value_type)
+
+        return msg
+
+    @property
+    def path(self):
+        return self.msg.http_endpoint_ref
+
+    @property
+    def value_type(self):
+        return self.msg.value_type
+
+    @property
+    def json_query_method(self):
+        return self.msg.json_query_method
+
+    @property
+    def json_path(self):
+        return self.msg.json_path_params.json_path
+
+    @property
+    def name(self):
+        return self.msg.name
+
+    def extract_value_from_response(self, response_msg):
+        if self._json_querier is None:
+            self._log.warning("json querier is not created.  Cannot extract value form response.")
+            return
+
+        try:
+            value = self._json_querier.query(response_msg)
+            converted_value = self._value_converter.convert(value)
+        except MonitoringParamError as e:
+            self._log.warning("Failed to extract value from json response: %s", str(e))
+            return
+        else:
+            self._current_value = converted_value
+
+
+class EndpointMonParamsPoller(object):
+    REQUEST_TIMEOUT_SECS = 10
+
+    def __init__(self, log, loop, endpoint, mon_params, on_update_cb=None):
+        self._log = log
+        self._loop = loop
+        self._endpoint = endpoint
+        self._mon_params = mon_params
+        self._on_update_cb = on_update_cb
+
+        self._poll_task = None
+
+    @property
+    def poll_interval(self):
+        return self._endpoint.poll_interval
+
+    def _get_mon_param_msgs(self):
+        return [mon_param.msg for mon_param in self._mon_params]
+
+    def _notify_subscriber(self):
+        if self._on_update_cb is None:
+             return
+
+        self._on_update_cb(self._get_mon_param_msgs())
+
+    def _apply_response_to_mon_params(self, response_msg):
+        for mon_param in self._mon_params:
+            mon_param.extract_value_from_response(response_msg)
+
+        self._notify_subscriber()
+
+    @asyncio.coroutine
+    def _poll_loop(self):
+        self._log.debug("Starting http endpoint %s poll loop", self._endpoint.url)
+        while True:
+            try:
+                response = yield from self._endpoint.poll()
+                self._apply_response_to_mon_params(response)
+            except concurrent.futures.CancelledError as e:
+                return
+
+            yield from asyncio.sleep(self.poll_interval, loop=self._loop)
+
+    def start(self):
+        self._log.debug("Got start request for endpoint poller: %s",
+                        self._endpoint.url)
+        if self._poll_task is not None:
+            return
+        self._poll_task = self._loop.create_task(self._poll_loop())
+
+    def stop(self):
+        self._log.debug("Got stop request for endpoint poller: %s",
+                        self._endpoint.url)
+        if self._poll_task is None:
+            return
+
+        self._poll_task.cancel()
+
+        self._poll_task = None
+
+
+class VnfMonitoringParamsController(object):
+    def __init__(self, log, loop, vnfr_id, management_ip,
+                 http_endpoint_msgs, monitoring_param_msgs,
+                 on_update_cb=None):
+        self._log = log
+        self._loop = loop
+        self._vnfr_id = vnfr_id
+        self._management_ip = management_ip
+        self._http_endpoint_msgs = http_endpoint_msgs
+        self._monitoring_param_msgs = monitoring_param_msgs
+
+        self._on_update_cb = on_update_cb
+        self._endpoints = self._create_endpoints()
+        self._mon_params = self._create_mon_params()
+
+        self._endpoint_mon_param_map = self._create_endpoint_mon_param_map(
+                self._endpoints, self._mon_params
+                )
+        self._endpoint_pollers = self._create_endpoint_pollers(self._endpoint_mon_param_map)
+
+    def _create_endpoints(self):
+        path_endpoint_map = {}
+        for ep_msg in self._http_endpoint_msgs:
+            endpoint = HTTPEndpoint(
+                    self._log,
+                    self._loop,
+                    self._management_ip,
+                    ep_msg,
+                    )
+            path_endpoint_map[endpoint.path] = endpoint
+
+        return path_endpoint_map
+
+    def _create_mon_params(self):
+        mon_params = {}
+        for mp_msg in self._monitoring_param_msgs:
+            mon_params[mp_msg.id] = MonitoringParam(
+                    self._log,
+                    mp_msg,
+                    )
+
+        return mon_params
+
+    def _create_endpoint_mon_param_map(self, endpoints, mon_params):
+        ep_mp_map = collections.defaultdict(list)
+        for mp in mon_params.values():
+            endpoint = endpoints[mp.path]
+            ep_mp_map[endpoint].append(mp)
+
+        return ep_mp_map
+
+    def _create_endpoint_pollers(self, ep_mp_map):
+        pollers = []
+
+        for endpoint, mon_params in ep_mp_map.items():
+            poller = EndpointMonParamsPoller(
+                    self._log,
+                    self._loop,
+                    endpoint,
+                    mon_params,
+                    self._on_update_cb
+                    )
+
+            pollers.append(poller)
+
+        return pollers
+
+    @property
+    def msgs(self):
+        msgs = []
+        for mp in self.mon_params:
+            msgs.append(mp.msg)
+
+        return msgs
+
+    @property
+    def mon_params(self):
+        return list(self._mon_params.values())
+
+    @property
+    def endpoints(self):
+        return list(self._endpoints.values())
+
+    def start(self):
+        """ Start monitoring """
+        self._log.debug("Starting monitoring of VNF id: %s", self._vnfr_id)
+        for poller in self._endpoint_pollers:
+            poller.start()
+
+    def stop(self):
+        """ Stop monitoring """
+        self._log.debug("Stopping monitoring of VNF id: %s", self._vnfr_id)
+        for poller in self._endpoint_pollers:
+            poller.stop()
+
+
+class VnfMonitorDtsHandler(mano_dts.DtsHandler):
+    """ VNF monitoring class """
+    # List of list: So we need to register for the list in the deepest level
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param"
+
+    @classmethod
+    def from_vnf_data(cls, tasklet, vnfr_msg, vnfd_msg):
+        handler = cls(tasklet.log, tasklet.dts, tasklet.loop,
+                vnfr_msg.id, vnfr_msg.mgmt_interface.ip_address,
+                vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
+
+        return handler
+
+    def __init__(self, log, dts, loop, vnfr_id, mgmt_ip, params, endpoints):
+        super().__init__(log, dts, loop)
+
+        self._mgmt_ip = mgmt_ip
+        self._vnfr_id = vnfr_id
+
+        mon_params = []
+        for mon_param in params:
+            param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
+                    mon_param.as_dict()
+                    )
+            mon_params.append(param)
+
+        http_endpoints = []
+        for endpoint in endpoints:
+            endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
+                    endpoint.as_dict()
+                    )
+            http_endpoints.append(endpoint)
+
+        self.log.debug("Creating monitoring param controller")
+        self.log.debug(" - Endpoints: %s", http_endpoints)
+        self.log.debug(" - Monitoring Params: %s", mon_params)
+
+        self._mon_param_controller = VnfMonitoringParamsController(
+                self.log,
+                self.loop,
+                self._vnfr_id,
+                self._mgmt_ip,
+                http_endpoints,
+                mon_params,
+                self.on_update_mon_params
+                )
+
+    def on_update_mon_params(self, mon_param_msgs):
+        for param_msg in mon_param_msgs:
+            self.reg.update_element(
+                    self.xpath(param_msg.id),
+                    param_msg,
+                    rwdts.XactFlag.ADVISE
+                   )
+
+    def start(self):
+        self._mon_param_controller.start()
+
+    def stop(self):
+        self.deregister()
+        self._mon_param_controller.stop()
+
+    def xpath(self, param_id=None):
+        """ Monitoring params xpath """
+        return("D,/vnfr:vnfr-catalog" +
+               "/vnfr:vnfr[vnfr:id='{}']".format(self._vnfr_id) +
+               "/vnfr:monitoring-param" +
+               ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+
+    @property
+    def msg(self):
+        """ The message with the monitoing params """
+        return self._mon_param_controller.msgs
+
+    def __del__(self):
+        self.stop()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register with dts """
+
+        self.reg = yield from self.dts.register(xpath=self.xpath(),
+                  flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+        assert self.reg is not None
+
+    def deregister(self):
+        """ de-register with dts """
+        if self.reg is not None:
+            self.log.debug("Deregistering path %s, regh = %s",
+                            VnfMonitorDtsHandler.XPATH,
+                            self.reg)
+            self.reg.deregister()
+            self.reg = None
+            self._vnfr = None
diff --git a/rwlaunchpad/plugins/rwmonparam/rwmonparam.py b/rwlaunchpad/plugins/rwmonparam/rwmonparam.py
new file mode 100644 (file)
index 0000000..571c45d
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwmonparam
+
+class Tasklet(rift.tasklets.rwmonparam.MonitoringParameterTasklet):
+    pass
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py b/rwlaunchpad/plugins/rwmonparam/test/utest_aggregator.py
new file mode 100644 (file)
index 0000000..f7b8e88
--- /dev/null
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import base64
+import logging
+import os
+import sys
+import tornado.escape
+import tornado.platform.asyncio
+import tornado.testing
+import tornado.web
+import unittest
+import xmlrunner
+
+import rift.tasklets.rwmonparam.aggregator as aggregator
+
+
+from gi.repository import VnfrYang
+
+logger = logging.getLogger("mon_params_test.py")
+
+
+class TestAggregator(unittest.TestCase):
+
+    def test_int_aggregator(self):
+        int_agg = aggregator.IntValueAggregator("SUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 6))
+
+        int_agg = aggregator.IntValueAggregator("AVERAGE", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 2))
+
+        int_agg = aggregator.IntValueAggregator("MAXIMUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 3))
+
+        int_agg = aggregator.IntValueAggregator("MINIMUM", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 1))
+
+        int_agg = aggregator.IntValueAggregator("COUNT", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_integer", 3))
+
+    def test_decimal_aggregator(self):
+        int_agg = aggregator.DecimalValueAggregator("SUM", [1.1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 6.1))
+
+        int_agg = aggregator.DecimalValueAggregator("AVERAGE", [1, 2, 3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 2.0))
+
+        int_agg = aggregator.DecimalValueAggregator("MAXIMUM", [1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 3.3))
+
+        int_agg = aggregator.DecimalValueAggregator("MINIMUM", [1.1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 1.1))
+
+        int_agg = aggregator.DecimalValueAggregator("COUNT", [1.1, 2, 3.3])
+        self.assertEqual(int_agg.aggregate(), ("value_decimal", 3))
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params.py
new file mode 100755 (executable)
index 0000000..a0817d7
--- /dev/null
@@ -0,0 +1,933 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import base64
+import logging
+import os
+import sys
+import tornado.escape
+import tornado.platform.asyncio
+import tornado.testing
+import tornado.web
+import unittest
+import xmlrunner
+
+import rift.tasklets.rwmonparam.vnfr_core as mon_params
+
+
+from gi.repository import VnfrYang
+
+logger = logging.getLogger("mon_params_test.py")
+
+
+class AsyncioTornadoTest(tornado.testing.AsyncHTTPTestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        super().setUp()
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+
+class MonParamsPingStatsTest(AsyncioTornadoTest):
+    ping_path = r"/api/v1/ping/stats"
+    ping_response = {
+            'ping-request-tx-count': 5,
+            'ping-response-rx-count': 10
+            }
+
+    mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+    mon_param_msg.from_dict({
+            'id': '1',
+            'name': 'ping-request-tx-count',
+            'json_query_method': "NAMEKEY",
+            'http_endpoint_ref': ping_path,
+            'value_type': "INT",
+            'description': 'no of ping requests',
+            'group_tag': 'Group1',
+            'widget_type': 'COUNTER',
+            'units': 'packets'
+            })
+
+    endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+    endpoint_msg.from_dict({
+        'path': ping_path,
+        'polling_interval_secs': 1,
+        'username': 'admin',
+        'password': 'password',
+        'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+        })
+
+    def create_endpoint(self, endpoint_msg):
+        self.mon_port = self.get_http_port()
+        endpoint = mon_params.HTTPEndpoint(
+                logger,
+                self._loop,
+                "127.0.0.1",
+                self.endpoint_msg,
+                )
+        # For each creation, update the descriptor as well
+        endpoint_msg.port = self.mon_port
+
+        return endpoint
+
+    def create_mon_param(self):
+        return mon_params.MonitoringParam(logger, self.mon_param_msg)
+
+    def get_app(self):
+        class PingStatsHandler(tornado.web.RequestHandler):
+            def get(this):
+                test_header = this.request.headers.get('TEST_KEY')
+                if test_header is None or test_header != 'TEST_VALUE':
+                    this.set_status(401)
+                    this.finish()
+                    return None
+
+                auth_header = this.request.headers.get('Authorization')
+                if auth_header is None or not auth_header.startswith('Basic '):
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                auth_header = auth_header.encode('ascii')
+                auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii')
+                login, password = auth_decoded.split(':', 2)
+                login = login.encode('ascii')
+                password = password.encode('ascii')
+                is_auth = (login == b"admin" and password == b"password")
+
+                if not is_auth:
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                this.write(self.ping_response)
+
+        return tornado.web.Application([
+            (self.ping_path, PingStatsHandler),
+            ])
+
+    def test_value_convert(self):
+        float_con = mon_params.ValueConverter("DECIMAL")
+        int_con = mon_params.ValueConverter("INT")
+        text_con = mon_params.ValueConverter("STRING")
+
+        a = float_con.convert("1.23")
+        self.assertEqual(a, 1.23)
+
+        a = float_con.convert(1)
+        self.assertEqual(a, float(1))
+
+        t = text_con.convert(1.23)
+        self.assertEqual(t, "1.23")
+
+        t = text_con.convert("asdf")
+        self.assertEqual(t, "asdf")
+
+        i = int_con.convert(1.23)
+        self.assertEqual(i, 1)
+
+    def test_json_key_value_querier(self):
+        kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_json_path_value_querier(self):
+        kv_querier = mon_params.JsonPathValueQuerier(logger, '$.ping-request-tx-count')
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.*['ping-request-tx-count']")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_endpoint(self):
+        @asyncio.coroutine
+        def run_test():
+            endpoint = self.create_endpoint(self.endpoint_msg)
+            resp = yield from endpoint.poll()
+            resp_json = tornado.escape.json_decode(resp)
+            self.assertEqual(resp_json["ping-request-tx-count"], 5)
+            self.assertEqual(resp_json["ping-response-rx-count"], 10)
+
+        self._loop.run_until_complete(
+                asyncio.wait_for(run_test(), 10, loop=self._loop)
+                )
+
+    def test_mon_param(self):
+        a = self.create_mon_param()
+        a.extract_value_from_response(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(a.current_value, 5)
+        self.assertEqual(a.msg.value_integer, 5)
+
+    def test_endpoint_poller(self):
+        endpoint = self.create_endpoint(self.endpoint_msg)
+        mon_param = self.create_mon_param()
+        poller = mon_params.EndpointMonParamsPoller(
+                logger, self._loop, endpoint, [mon_param],
+                )
+        poller.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+        self.assertEqual(mon_param.current_value, 5)
+
+        poller.stop()
+
+    def test_params_controller(self):
+        new_port = self.get_http_port()
+        # Update port after new port is initialized
+        self.endpoint_msg.port = new_port
+        ctrl = mon_params.VnfMonitoringParamsController(
+                logger, self._loop, "1", "127.0.0.1", 
+                [self.endpoint_msg], [self.mon_param_msg],
+                )
+        ctrl.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+
+        ctrl.stop()
+
+        self.assertEqual(1, len(ctrl.mon_params))
+        mon_param = ctrl.mon_params[0]
+        self.assertEqual(mon_param.current_value, 5)
+
+
+class AsyncioTornadoHttpsTest(tornado.testing.AsyncHTTPSTestCase):
+    def setUp(self):
+        self._loop = asyncio.get_event_loop()
+        super().setUp()
+
+    def get_new_ioloop(self):
+        return tornado.platform.asyncio.AsyncIOMainLoop()
+
+
+class MonParamsPingStatsHttpsTest(AsyncioTornadoHttpsTest):
+    ping_path = r"/api/v1/ping/stats"
+    ping_response = {
+            'ping-request-tx-count': 5,
+            'ping-response-rx-count': 10
+            }
+
+    mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+    mon_param_msg.from_dict({
+            'id': '1',
+            'name': 'ping-request-tx-count',
+            'json_query_method': "NAMEKEY",
+            'http_endpoint_ref': ping_path,
+            'value_type': "INT",
+            'description': 'no of ping requests',
+            'group_tag': 'Group1',
+            'widget_type': 'COUNTER',
+            'units': 'packets'
+            })
+
+    endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+    endpoint_msg.from_dict({
+        'path': ping_path,
+        'https': 'true',
+        'polling_interval_secs': 1,
+        'username': 'admin',
+        'password': 'password',
+        'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+        })
+
+    def create_endpoint(self, endpoint_msg):
+        self.mon_port = self.get_http_port()
+        endpoint = mon_params.HTTPEndpoint(
+                logger,
+                self._loop,
+                "127.0.0.1",
+                self.endpoint_msg,
+                )
+        # For each creation, update the descriptor as well
+        endpoint_msg.port = self.mon_port
+
+        return endpoint
+
+    def create_mon_param(self):
+        return mon_params.MonitoringParam(logger, self.mon_param_msg)
+
+    def get_app(self):
+        class PingStatsHandler(tornado.web.RequestHandler):
+            def get(this):
+                test_header = this.request.headers.get('TEST_KEY')
+                if test_header is None or test_header != 'TEST_VALUE':
+                    this.set_status(401)
+                    this.finish()
+                    return None
+
+                auth_header = this.request.headers.get('Authorization')
+                if auth_header is None or not auth_header.startswith('Basic '):
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                auth_header = auth_header.encode('ascii')
+                auth_decoded = base64.decodestring(auth_header[6:]).decode('ascii')
+                login, password = auth_decoded.split(':', 2)
+                login = login.encode('ascii')
+                password = password.encode('ascii')
+                is_auth = (login == b"admin" and password == b"password")
+
+                if not is_auth:
+                    this.set_status(401)
+                    this.set_header('WWW-Authenticate', 'Basic realm=Restricted')
+                    this._transforms = []
+                    this.finish()
+                    return None
+
+                this.write(self.ping_response)
+
+        return tornado.web.Application([
+            (self.ping_path, PingStatsHandler),
+            ])
+
+    def test_value_convert(self):
+        float_con = mon_params.ValueConverter("DECIMAL")
+        int_con = mon_params.ValueConverter("INT")
+        text_con = mon_params.ValueConverter("STRING")
+
+        a = float_con.convert("1.23")
+        self.assertEqual(a, 1.23)
+
+        a = float_con.convert(1)
+        self.assertEqual(a, float(1))
+
+        t = text_con.convert(1.23)
+        self.assertEqual(t, "1.23")
+
+        t = text_con.convert("asdf")
+        self.assertEqual(t, "asdf")
+
+        i = int_con.convert(1.23)
+        self.assertEqual(i, 1)
+
+    def test_json_key_value_querier(self):
+        kv_querier = mon_params.JsonKeyValueQuerier(logger, "ping-request-tx-count")
+        value = kv_querier.query(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(value, 5)
+
+    def test_endpoint(self):
+        @asyncio.coroutine
+        def run_test():
+            endpoint = self.create_endpoint(self.endpoint_msg)
+            resp = yield from endpoint.poll()
+            resp_json = tornado.escape.json_decode(resp)
+            self.assertEqual(resp_json["ping-request-tx-count"], 5)
+            self.assertEqual(resp_json["ping-response-rx-count"], 10)
+
+        self._loop.run_until_complete(
+                asyncio.wait_for(run_test(), 10, loop=self._loop)
+                )
+
+    def test_mon_param(self):
+        a = self.create_mon_param()
+        a.extract_value_from_response(tornado.escape.json_encode(self.ping_response))
+        self.assertEqual(a.current_value, 5)
+        self.assertEqual(a.msg.value_integer, 5)
+
+    def test_endpoint_poller(self):
+        endpoint = self.create_endpoint(self.endpoint_msg)
+        mon_param = self.create_mon_param()
+        poller = mon_params.EndpointMonParamsPoller(
+                logger, self._loop, endpoint, [mon_param],
+                )
+        poller.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+        self.assertEqual(mon_param.current_value, 5)
+
+        poller.stop()
+
+    def test_params_controller(self):
+        new_port = self.get_http_port()
+        # Update port after new port is initialized
+        self.endpoint_msg.port = new_port
+        ctrl = mon_params.VnfMonitoringParamsController(
+                logger, self._loop, "1", "127.0.0.1", 
+                [self.endpoint_msg], [self.mon_param_msg],
+                )
+        ctrl.start()
+
+        self._loop.run_until_complete(asyncio.sleep(1, loop=self._loop))
+
+        ctrl.stop()
+
+        self.assertEqual(1, len(ctrl.mon_params))
+        mon_param = ctrl.mon_params[0]
+        self.assertEqual(mon_param.current_value, 5)
+
+
+class VRouterStatsTest(unittest.TestCase):
+    system_response = {
+        "system": {
+            "cpu": [
+                {
+                    "usage": 2.35,
+                    "cpu": "all"
+                },
+                {
+                    "usage": 5.35,
+                    "cpu": "1"
+                }
+            ]
+        }
+    }
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.system.cpu[@.cpu is 'all'].usage")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 2.35)
+
+
+class TrafsinkStatsTest(unittest.TestCase):
+    system_response = {
+       "rw-vnf-base-opdata:port-state": [
+         {
+           "ip": [
+             {
+               "address": "12.0.0.3/24"
+             }
+           ],
+           "rw-trafgen-data:trafgen-info": {
+             "src_l4_port": 1234,
+             "dst_l4_port": 5678,
+             "dst_ip_address": "192.168.1.1",
+             "tx_state": "Off",
+             "dst_mac_address": "00:00:00:00:00:00",
+             "tx_mode": "single-template",
+             "packet-count": 0,
+             "tx-cycles": 5478,
+             "tx_burst": 16,
+             "src_ip_address": "192.168.0.1",
+             "pkt_size": 64,
+             "src_mac_address": "fa:16:3e:07:b1:52",
+             "descr-string": "",
+             "tx_rate": 100
+           },
+           "counters": {
+             "input-errors": 0,
+             "output-bytes": 748,
+             "input-pause-xoff-pkts": 0,
+             "input-badcrc-pkts": 0,
+             "input-bytes": 62,
+             "rx-rate-mbps": 9576,
+             "output-pause-xoff-pkts": 0,
+             "input-missed-pkts": 0,
+             "input-packets": 1,
+             "output-errors": 0,
+             "tx-rate-mbps": 0,
+             "input-pause-xon-pkts": 0,
+             "output-pause-xon-pkts": 0,
+             "tx-rate-pps": 0,
+             "input-mcast-pkts": 0,
+             "rx-rate-pps": 0,
+             "output-packets": 6,
+             "input-nombuf-pkts": 0
+           },
+           "info": {
+             "numa-socket": 0,
+             "transmit-queues": 1,
+             "privatename": "eth_uio:pci=0000:00:04.0",
+             "duplex": "full-duplex",
+             "virtual-fabric": "No",
+             "link-state": "up",
+             "rte-port-id": 0,
+             "fastpath-instance": 1,
+             "id": 0,
+             "app-name": "rw_trafgen",
+             "speed": 10000,
+             "receive-queues": 1,
+             "descr-string": "",
+             "mac": "fa:16:3e:07:b1:52"
+           },
+           "portname": "trafsink_vnfd/cp0",
+           "queues": {
+             "rx-queue": [
+               {
+                 "packets": 1,
+                 "bytes-MB": 0,
+                 "qid": 0,
+                 "rate-mbps": 0,
+                 "rate-pps": 0
+               }
+             ],
+             "tx-queue": [
+               {
+                 "bytes-MB": 0,
+                 "packets": 6,
+                 "rate-pps": 0,
+                 "errors": 0,
+                 "qid": 0,
+                 "rate-mbps": 0
+               }
+             ]
+           }
+         }
+       ]
+     }
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafsink_vnfd/cp0'].counters.'rx-rate-mbps'")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 9576)
+
+class IkeStatsTest(unittest.TestCase):
+    system_response = {
+      "rw-ipsec:ipsec-service-statistics": [
+    {
+      "name": "client1",
+      "statistics": {
+        "esp": {
+          "rx-bytes": 0,
+          "rx-packets": 0,
+          "tx-bytes": 0,
+          "tx-packets": 0
+        },
+        "rekey": {
+          "total": 3321,
+          "rate": 132,
+          "instantaneous-rate": 2
+        },
+        "state": {
+          "ike-sas": 10,
+          "threads-in-use": 5,
+          "swanctl-dir": "\/tmp\/strongswan4x3dni"
+        }
+      }
+    },
+    {
+      "name": "client0",
+      "statistics": {
+        "esp": {
+          "rx-bytes": 0,
+          "rx-packets": 0,
+          "tx-bytes": 0,
+          "tx-packets": 0
+        },
+        "rekey": {
+          "total": 3345,
+          "rate": 0,
+          "instantaneous-rate": 0
+        },
+        "state": {
+          "ike-sas": 50,
+          "threads-in-use": 5,
+          "swanctl-dir": "\/tmp\/strongswann21td3"
+        }
+      }
+    }
+  ]
+     }
+
+
+    def test_object_path_value_querier(self):
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.name is 'client1'].statistics.rekey.rate")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 132)
+        kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.name is 'client1'].statistics.state.'ike-sas'")
+        value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+        self.assertEqual(value, 10)
+
+
+
+
+class PortLatencyTest(unittest.TestCase):
+    system_response = {
+  "rw-vnf-base-opdata:port-state": [
+    {
+      "info": {
+        "fastpath-instance": 1,
+        "duplex": "full-duplex",
+        "link-state": "up",
+        "lport-id": 81931,
+        "mtu": 1500,
+        "descr-string": "",
+        "transmit-queues": 1,
+        "mac": "fa:16:3e:c7:4a:b8",
+        "admin-state": "up",
+        "rte-port-id": 0,
+        "numa-socket": 0,
+        "app-name": "rw_trafgen",
+        "speed": 10000,
+        "virtual-fabric": "No",
+        "id": 0,
+        "receive-queues": 1,
+        "privatename": "eth_uio:pci=0000:00:04.0"
+      },
+      "rw-trafgen-data:trafgen-info": {
+        "maximum-latency": 124412,
+        "latency-distribution": [
+          {
+            "range-end": 100,
+            "range-start": 0,
+            "packets": 0
+          },
+          {
+            "range-end": 200,
+            "range-start": 101,
+            "packets": 0
+          },
+          {
+            "range-end": 300,
+            "range-start": 201,
+            "packets": 0
+          },
+          {
+            "range-end": 400,
+            "range-start": 301,
+            "packets": 0
+          },
+          {
+            "range-end": 500,
+            "range-start": 401,
+            "packets": 0
+          },
+          {
+            "range-end": 600,
+            "range-start": 501,
+            "packets": 0
+          },
+          {
+            "range-end": 700,
+            "range-start": 601,
+            "packets": 0
+          },
+          {
+            "range-end": 800,
+            "range-start": 701,
+            "packets": 0
+          },
+          {
+            "range-end": 900,
+            "range-start": 801,
+            "packets": 0
+          },
+          {
+            "range-end": 1000,
+            "range-start": 901,
+            "packets": 0
+          },
+          {
+            "range-end": 1100,
+            "range-start": 1001,
+            "packets": 0
+          },
+          {
+            "range-end": 1200,
+            "range-start": 1101,
+            "packets": 0
+          },
+          {
+            "range-end": 1300,
+            "range-start": 1201,
+            "packets": 0
+          },
+          {
+            "range-end": 1400,
+            "range-start": 1301,
+            "packets": 0
+          },
+          {
+            "range-end": 1500,
+            "range-start": 1401,
+            "packets": 0
+          },
+          {
+            "range-end": 0,
+            "range-start": 1501,
+            "packets": 1513641
+          }
+        ],
+        "descr-string": "",
+        "tx_mode": "range-template",
+        "minimum-latency": 1928,
+        "pkt_size": 512,
+        "tx_rate": 100,
+        "tx-cycles": 35206,
+        "src_ip_address": "12.0.0.3",
+        "src_l4_port": 10000,
+        "dst_ip_address": "12.0.0.2",
+        "mean-deviation": 4500,
+        "queue": [
+          {
+            "maximum": 124412,
+            "num-packets": 1513641,
+            "average": 12112,
+            "mean-deviation": 4500,
+            "qid": 0,
+            "minimum": 1928
+          }
+        ],
+        "packet-count": 0,
+        "average-latency": 12112,
+        "dst_l4_port": 5678,
+        "tx_state": "On",
+        "tx_burst": 16
+      },
+      "counters": {
+        "tx-rate-pps": 139630,
+        "rx-rate-mbps": 232,
+        "tx-rate-mbps": 589,
+        "output-packets": 49285239,
+        "input-missed-pkts": 0,
+        "output-errors": 0,
+        "input-nombuf-pkts": 0,
+        "input-errors": 0,
+        "input-mcast-pkts": 0,
+        "output-bytes": 26022584932,
+        "input-packets": 22537250,
+        "input-bytes": 11899650400,
+        "rx-rate-pps": 55158
+      },
+      "portname": "trafgencp0",
+      "ip": [
+        {
+          "address": "12.0.0.3\/24"
+        }
+      ],
+      "queues": {
+        "rx-queue": [
+          {
+            "packets": 22537250,
+            "bytes-MB": 95197,
+            "rate-mbps": 232,
+            "qid": 0,
+            "rate-pps": 55158
+          }
+        ],
+        "tx-queue": [
+          {
+            "bytes-MB": 208180,
+            "packets": 49285239,
+            "errors": 0,
+            "rate-mbps": 589,
+            "qid": 0,
+            "rate-pps": 139630
+          }
+        ]
+      },
+      "extended-stats": {
+        "xstats": [
+          {
+            "name": "rx_good_packets",
+            "value": 22555470
+          },
+          {
+            "name": "tx_good_packets",
+            "value": 49337664
+          },
+          {
+            "name": "rx_good_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "tx_good_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "rx_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_errors",
+            "value": 0
+          },
+          {
+            "name": "rx_mbuf_allocation_errors",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_packets",
+            "value": 22555470
+          },
+          {
+            "name": "rx_q0_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "rx_q0_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_packets",
+            "value": 49337664
+          },
+          {
+            "name": "tx_q0_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "rx_q0_good_packets",
+            "value": 22555470
+          },
+          {
+            "name": "rx_q0_good_bytes",
+            "value": 11458161160
+          },
+          {
+            "name": "rx_q0_multicast_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_broadcast_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_undersize_packets",
+            "value": 38
+          },
+          {
+            "name": "rx_q0_size_64_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_65_127_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_128_255_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_256_511_packets",
+            "value": 22555432
+          },
+          {
+            "name": "rx_q0_size_512_1023_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_1024_1517_packets",
+            "value": 0
+          },
+          {
+            "name": "rx_q0_size_1518_max_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_good_packets",
+            "value": 49337664
+          },
+          {
+            "name": "tx_q0_good_bytes",
+            "value": 25063512052
+          },
+          {
+            "name": "tx_q0_errors",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_multicast_packets",
+            "value": 18
+          },
+          {
+            "name": "tx_q0_broadcast_packets",
+            "value": 11
+          },
+          {
+            "name": "tx_q0_undersize_packets",
+            "value": 40
+          },
+          {
+            "name": "tx_q0_size_64_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_65_127_packets",
+            "value": 5
+          },
+          {
+            "name": "tx_q0_size_128_255_packets",
+            "value": 2
+          },
+          {
+            "name": "tx_q0_size_256_511_packets",
+            "value": 49337617
+          },
+          {
+            "name": "tx_q0_size_512_1023_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_1024_1517_packets",
+            "value": 0
+          },
+          {
+            "name": "tx_q0_size_1518_max_packets",
+            "value": 0
+          }
+        ]
+      },
+      "lacp-info": {
+        "state": {
+          "distributing": "On",
+          "active": "Off",
+          "collecting": "On"
+        },
+        "counters": {
+          "marker": {
+            "rx": 0,
+            "tx": 0,
+            "errors": 0,
+            "nobuf": 0
+          },
+          "lacppdu": {
+            "rx": 0,
+            "tx": 0,
+            "errors": 0,
+            "nobuf": 0
+          }
+        }
+      }
+    }
+  ]
+    }
+
+
+    def test_object_path_value_querier(self):
+          kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafgencp0'].'rw-trafgen-data:trafgen-info'.pkt_size")
+          value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+          self.assertEqual(value, 512)
+          kv_querier = mon_params.ObjectPathValueQuerier(logger, "$..*[@.portname is 'trafgencp0'].'rw-trafgen-data:trafgen-info'.'average-latency'")
+          value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+          self.assertEqual(value, 12112)
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py b/rwlaunchpad/plugins/rwmonparam/test/utest_mon_params_dts.py
new file mode 100644 (file)
index 0000000..680cc82
--- /dev/null
@@ -0,0 +1,349 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import itertools
+import logging
+import os
+import sys
+import unittest
+import uuid
+
+import xmlrunner
+import unittest.mock as mock
+
+from rift.tasklets.rwmonparam import vnfr_core as vnf_mon_params
+from rift.tasklets.rwmonparam import nsr_core as nsr_mon_params
+import rift.test.dts
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        VnfrYang as vnfryang,
+        RwNsrYang,
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfrYang,
+        RwVnfdYang,
+        RwNsdYang
+        )
+
+import utest_mon_params
+
+
+class MonParamMsgGenerator(object):
+    def __init__(self, num_messages=1):
+        ping_path = r"/api/v1/ping/stats"
+        self._endpoint_msg = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
+            'path': ping_path,
+            'https': 'true',
+            'polling_interval_secs': 1,
+            'username': 'admin',
+            'password': 'password',
+            'headers': [{'key': 'TEST_KEY', 'value': 'TEST_VALUE'}],
+            })
+
+        self._mon_param_msgs = []
+        for i in range(1, num_messages):
+            self._mon_param_msgs.append(vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+                'id': '%s' % i,
+                'name': 'param_num_%s' % i,
+                'json_query_method': "NAMEKEY",
+                'http_endpoint_ref': ping_path,
+                'value_type': "INT",
+                'value_integer': i,
+                'description': 'desc for param_num_%s' % i,
+                'group_tag': 'Group1',
+                'widget_type': 'COUNTER',
+                'units': 'packets'
+                })
+            )
+
+        self._msgs = iter(self.mon_param_msgs)
+
+    @property
+    def mon_param_msgs(self):
+        return self._mon_param_msgs
+
+    @property
+    def endpoint_msgs(self):
+        return [self._endpoint_msg]
+
+    def next_message(self):
+        return next(self._msgs)
+
+
+
+class MonParamsDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+        return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.msg_gen = MonParamMsgGenerator(4)
+        self.vnf_handler = vnf_mon_params.VnfMonitorDtsHandler(
+                self.log, self.dts, self.loop, 1, "1.1.1.1",
+                self.msg_gen.mon_param_msgs, self.msg_gen.endpoint_msgs
+                )
+
+        store = self.setup_mock_store(aggregation_type=None,
+            monps=None,
+            legacy=True)
+
+        self.nsr_handler = nsr_mon_params.NsrMonitorDtsHandler(
+            self.log, self.dts, self.loop, store.nsr[0], [store.get_vnfr()], store)
+
+
+    def tearDown(self):
+        super().tearDown()
+
+    def setup_mock_store(self, aggregation_type, monps, legacy=False):
+        store = mock.MagicMock()
+
+        mock_vnfd =  RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+            'id': "1",
+            'monitoring_param': [
+                {'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '1',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-request-tx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'},
+                {'description': 'no of ping responses',
+                 'group_tag': 'Group1',
+                 'http_endpoint_ref': 'api/v1/ping/stats',
+                 'id': '2',
+                 'json_query_method': 'NAMEKEY',
+                 'name': 'ping-response-rx-count',
+                 'units': 'packets',
+                 'value_type': 'INT',
+                 'widget_type': 'COUNTER'}],
+            })
+        store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
+
+        mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+            'id': '1',
+            'vnfd_ref': '1',
+            'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
+            })
+        store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
+
+        mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+            'ns_instance_config_ref': "1",
+            'name_ref': "Foo",
+            'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
+
+            })
+        store.get_nsr = mock.MagicMock(return_value=mock_nsr)
+        store.nsr = [mock_nsr]
+
+        monp = [{'aggregation_type': aggregation_type,
+                 'id': '1',
+                 'description': 'no of ping requests',
+                 'group_tag': 'Group1',
+                 'units': 'packets',
+                 'widget_type': 'COUNTER',
+                 'name': 'ping-request-tx-count',
+                 'value_type': 'INT',
+                 'vnfd_monitoring_param': [
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '1'},
+                    {'vnfd_id_ref': '1',
+                     'vnfd_monitoring_param_ref': '2'}]
+                }]
+
+        mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+            'id': str(uuid.uuid1()),
+            'monitoring_param': (monp if not legacy else [])
+            })
+
+        store.get_nsd = mock.MagicMock(return_value=mock_nsd)
+
+        return store
+
+    @asyncio.coroutine
+    def get_published_xpaths(self):
+        published_xpaths = set()
+
+        res_iter = yield from self.dts.query_read("D,/rwdts:dts")
+        for i in res_iter:
+            res = (yield from i).result
+            for member in res.member:
+                published_xpaths |= {reg.keyspec for reg in member.state.registration if reg.flags == "publisher"}
+
+        return published_xpaths
+
+    @asyncio.coroutine
+    def register_vnf_publisher(self):
+        yield from self.vnf_handler.register()
+
+    def add_param_to_publisher(self):
+        msg = self.msg_gen.next_message()
+        self.vnf_handler.on_update_mon_params([msg])
+        return msg
+
+    @asyncio.coroutine
+    def register_vnf_test_subscriber(self, on_prepare=None):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        # Register needs to wait till reg-ready is hit, dts does not provide it
+        # out-of-the-box.
+        @asyncio.coroutine
+        def on_ready(*args, **kwargs):
+            ready_event.set()
+
+        self.vnf_test_subscriber = yield from self.dts_sub.register(
+                self.vnf_handler.xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_ready=on_ready, on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.CACHE,
+                )
+
+        yield from ready_event.wait()
+
+    def get_ns_mon_param_msgs(self):
+        return self.ns_handler.get_nsr_mon_param_msgs({'1':['1']})
+
+    @rift.test.dts.async_test
+    def _test_vnf_handler_registration(self):
+        yield from self.vnf_handler.register()
+        published_xpaths = yield from self.get_published_xpaths()
+        assert self.vnf_handler.xpath() in published_xpaths
+
+    @rift.test.dts.async_test
+    def _test_add_vnf_mon_params(self):
+        yield from self.register_vnf_publisher()
+        self.add_param_to_publisher()
+
+        yield from self.register_vnf_test_subscriber()
+        self.add_param_to_publisher()
+
+        # RIFT-12888: Elements do not go immediately into cache after on_prepare.
+        # Because of this, we can't guarantee that the second param will actually be
+        # in the cache yet.
+        elements = list(self.vnf_test_subscriber.elements)
+        assert len(elements) > 0
+        for element in elements:
+            assert element in self.msg_gen.mon_param_msgs
+
+    @rift.test.dts.async_test
+    def _test_nsr_handler_registration(self):
+        yield from self.nsr_handler.register()
+        published_xpaths = yield from self.get_published_xpaths()
+        assert self.nsr_handler.xpath() in published_xpaths
+
+    def _test_publish(self, aggregation_type, expected_value, legacy=False):
+
+        self.msg_gen = MonParamMsgGenerator(4)
+        store = self.setup_mock_store(aggregation_type=aggregation_type,
+            monps=self.msg_gen.mon_param_msgs,
+            legacy=legacy)
+
+        self.vnf_handler = vnf_mon_params.VnfMonitorDtsHandler(
+                self.log, self.dts, self.loop, 1, "1.1.1.1",
+                self.msg_gen.mon_param_msgs, self.msg_gen.endpoint_msgs
+                )
+
+        self.nsr_handler = nsr_mon_params.NsrMonitorDtsHandler(
+            self.log, self.dts, self.loop, store.nsr[0], [store.get_vnfr()], store)
+
+        # def callback():
+        yield from self.nsr_handler.register()
+        yield from self.nsr_handler.start()
+        published_xpaths = yield from self.get_published_xpaths()
+
+        yield from self.register_vnf_publisher()
+        self.add_param_to_publisher()
+        self.add_param_to_publisher()
+
+        nsr_id = store.get_nsr().ns_instance_config_ref
+
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        itr = yield from self.dts.query_read(self.nsr_handler.xpath(),
+            rwdts.XactFlag.MERGE)
+
+
+        values = []
+        for res in itr:
+            result = yield from res
+            nsr_monp = result.result
+            values.append(nsr_monp.value_integer)
+
+        print (values)
+        assert expected_value in values
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_avg(self):
+        yield from self._test_publish("AVERAGE", 1)
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_sum(self):
+        yield from self._test_publish("SUM", 3)
+
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_max(self):
+        yield from self._test_publish("MAXIMUM", 2)
+
+    @rift.test.dts.async_test
+    def _test_nsr_monitor_publish_min(self):
+        yield from self._test_publish("MINIMUM", 1)
+
+    @rift.test.dts.async_test
+    def test_nsr_monitor_publish_count(self):
+        yield from self._test_publish("COUNT", 2)
+
+    @rift.test.dts.async_test
+    def _test_legacy_nsr_monitor_publish_avg(self):
+        yield from self._test_publish("AVERAGE", 1, legacy=True)
+
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    MonParamsDtsTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/plugins/rwnsm/CMakeLists.txt b/rwlaunchpad/plugins/rwnsm/CMakeLists.txt
new file mode 100644 (file)
index 0000000..1db4a46
--- /dev/null
@@ -0,0 +1,47 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwnsmtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/rwnsm_conman.py
+    rift/tasklets/${TASKLET_NAME}/rwnsmplugin.py
+    rift/tasklets/${TASKLET_NAME}/openmano_nsm.py
+    rift/tasklets/${TASKLET_NAME}/cloud.py
+    rift/tasklets/${TASKLET_NAME}/config_value_pool.py
+    rift/tasklets/${TASKLET_NAME}/publisher.py
+    rift/tasklets/${TASKLET_NAME}/xpath.py
+    rift/tasklets/${TASKLET_NAME}/rwvnffgmgr.py
+    rift/tasklets/${TASKLET_NAME}/scale_group.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwnsm/Makefile b/rwlaunchpad/plugins/rwnsm/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/__init__.py
new file mode 100644 (file)
index 0000000..1a3438c
--- /dev/null
@@ -0,0 +1 @@
+from .rwnsmtasklet import NsmTasklet
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/cloud.py
new file mode 100644 (file)
index 0000000..5326ca1
--- /dev/null
@@ -0,0 +1,238 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+from gi.repository import (
+    RwDts as rwdts,
+    RwcalYang as rwcal,
+    RwTypes,
+    ProtobufC,
+    )
+
+import rift.mano.cloud
+import rift.mano.dts as mano_dts
+import rift.tasklets
+
+from . import openmano_nsm
+from . import rwnsmplugin
+
+
+class RwNsPlugin(rwnsmplugin.NsmPluginBase):
+    """
+        RW Implentation of the NsmPluginBase
+    """
+    def __init__(self, dts, log, loop, publisher, ro_account):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+    def create_nsr(self, nsr_msg, nsd):
+        """
+        Create Network service record
+        """
+        pass
+
+    @asyncio.coroutine
+    def deploy(self, nsr):
+        pass
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, config_xact):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from nsr.instantiate(config_xact)
+
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from vnfr.instantiate(nsr)
+
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vlr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from vlr.instantiate()
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """
+        Terminate the network service
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """
+        Terminate the network service
+        """
+        yield from vnfr.terminate()
+
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """
+        Terminate the virtual link
+        """
+        yield from vlr.terminate()
+
+
+class NsmPlugins(object):
+    """ NSM Plugins """
+    def __init__(self):
+        self._plugin_classes = {
+                "openmano": openmano_nsm.OpenmanoNsPlugin,
+                }
+
+    @property
+    def plugins(self):
+        """ Plugin info """
+        return self._plugin_classes
+
+    def __getitem__(self, name):
+        """ Get item """
+        print("%s", self._plugin_classes)
+        return self._plugin_classes[name]
+
+    def register(self, plugin_name, plugin_class, *args):
+        """ Register a plugin to this Nsm"""
+        self._plugin_classes[plugin_name] = plugin_class
+
+    def deregister(self, plugin_name, plugin_class, *args):
+        """ Deregister a plugin to this Nsm"""
+        if plugin_name in self._plugin_classes:
+            del self._plugin_classes[plugin_name]
+
+    def class_by_plugin_name(self, name):
+        """ Get class by plugin name """
+        return self._plugin_classes[name]
+
+
+class ROAccountConfigSubscriber(mano_dts.AbstractConfigSubscriber):
+
+    def key_name(self):
+        return "name"
+
+    def get_xpath(self):
+        return("C,/rw-launchpad:resource-orchestrator")
+
+
+class CloudAccountConfigSubscriber:
+    def __init__(self, log, dts, log_hdl):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+
+        self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts,
+                self._log,
+                self._log_hdl,
+                rift.mano.cloud.CloudAccountConfigCallbacks())
+
+    def get_cloud_account_sdn_name(self, account_name):
+        if account_name in self._cloud_sub.accounts:
+            self._log.debug("Cloud accnt msg is %s",self._cloud_sub.accounts[account_name].account_msg)
+            if self._cloud_sub.accounts[account_name].account_msg.has_field("sdn_account"):
+                sdn_account = self._cloud_sub.accounts[account_name].account_msg.sdn_account 
+                self._log.info("SDN associated with Cloud name %s is %s", account_name, sdn_account)
+                return sdn_account
+            else:
+                self._log.debug("No SDN Account associated with Cloud name %s", account_name)
+                return None
+
+    @asyncio.coroutine
+    def register(self):
+       self._cloud_sub.register()
+
+
+class ROAccountPluginSelector(object):
+    """
+    Select the RO based on the config.
+
+    If no RO account is specified, then default to rift-ro.
+
+    Note:
+    Currently only one RO can be used (one-time global config.)
+    """
+    DEFAULT_PLUGIN = RwNsPlugin
+
+    def __init__(self, dts, log, loop, records_publisher):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._records_publisher = records_publisher
+
+        self._nsm_plugins = NsmPlugins()
+
+        self._ro_sub = ROAccountConfigSubscriber(
+                self._log,
+                self._dts,
+                self._loop,
+                callback=self.on_ro_account_change
+                )
+
+        # The default plugin will be RwNsPlugin
+        self._plugin_instances = {}
+        self._ro_plugin = self._create_plugin(self.DEFAULT_PLUGIN, None)
+
+    @property
+    def ro_plugin(self):
+        return self._ro_plugin
+
+    def on_ro_account_change(self, ro_account, action):
+        if action == rwdts.QueryAction.CREATE:
+            self._on_ro_account_added(ro_account)
+        elif action == rwdts.QueryAction.DELETE:
+            self._on_ro_account_deleted(ro_account)
+
+    def _on_ro_account_added(self, ro_account):
+        self._log.debug("Got nsm plugin RO account: %s", ro_account)
+        try:
+            nsm_cls = self._nsm_plugins.class_by_plugin_name(
+                    ro_account.account_type
+                    )
+        except KeyError as e:
+            self._log.debug(
+                "RO account nsm plugin not found: %s.  Using standard rift nsm.",
+                ro_account.name
+                )
+            nsm_cls = self.DEFAULT_PLUGIN
+
+        self._ro_plugin = self._create_plugin(nsm_cls, ro_account)
+
+    def _on_ro_account_deleted(self, ro_account):
+        self._ro_plugin = None
+
+    def _create_plugin(self, nsm_cls, ro_account):
+        # Check to see if the plugin was already instantiated
+        if nsm_cls in self._plugin_instances:
+            self._log.debug("RO account nsm plugin already instantiated.  Using existing.")
+            return self._plugin_instances[nsm_cls]
+
+        # Otherwise, instantiate a new plugin using the cloud account
+        self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
+        nsm_instance = nsm_cls(self._dts, self._log, self._loop,
+                               self._records_publisher, ro_account)
+
+        self._plugin_instances[nsm_cls] = nsm_instance
+        return nsm_instance
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._ro_sub.register()
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/config_value_pool.py
new file mode 100644 (file)
index 0000000..9e35e2f
--- /dev/null
@@ -0,0 +1,154 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import os
+import pickle
+import uuid
+
+
+class ParameterValueError(Exception):
+    pass
+
+
+class ParameterValuePool(object):
+    def __init__(self, log, name, value_iter):
+        self._log = log
+        self._name = name
+
+        self._used_pool_values = []
+        self._available_pool_values = list(value_iter)
+
+        self._backing_filepath = os.path.join(
+                os.environ["RIFT_ARTIFACTS"],
+                "parameter_pools",
+                self._name
+                )
+
+        self._read_used_pool_values()
+
+    def _save_used_pool_values(self):
+        dir_path = os.path.dirname(self._backing_filepath)
+        if not os.path.exists(dir_path):
+            try:
+                os.makedirs(dir_path, exist_ok=True)
+            except OSError as e:
+                self._log.error("Could not create directory for save used pool: %s", str(e))
+
+        try:
+            with open(self._backing_filepath, "wb") as hdl:
+                pickle.dump(self._used_pool_values, hdl)
+        except OSError as e:
+            self._log.error(
+                    "Could not open the parameter value pool file: %s",
+                    str(e))
+        except pickle.PickleError as e:
+            self._log.error(
+                    "Could not pickle the used parameter value pool: %s",
+                    str(e))
+
+    def _read_used_pool_values(self):
+        try:
+            with open(self._backing_filepath, 'rb') as hdl:
+                self._used_pool_values = pickle.load(hdl)
+
+        except (OSError, EOFError):
+            self._log.warning("Could not read from backing file: %s",
+                              self._backing_filepath)
+            self._used_pool_values = []
+
+        except pickle.PickleError as e:
+            self._log.warning("Could not unpickle the used parameter value pool from %s: %s",
+                              self._backing_filepath, str(e))
+            self._used_pool_values = []
+
+        for value in self._used_pool_values:
+            self._available_pool_values.remove(value)
+
+    def get_next_unused_value(self):
+        if len(self._available_pool_values) == 0:
+            raise ParameterValueError("Not more parameter values to to allocate")
+
+        next_value = self._available_pool_values[0]
+        self._log.debug("Got next value for parameter pool %s: %s", self._name, next_value)
+
+        return next_value
+
+    def add_used_value(self, value):
+        value = int(value)
+
+        if len(self._available_pool_values) == 0:
+            raise ParameterValueError("Not more parameter values to to allocate")
+
+        if value in self._used_pool_values:
+            raise ParameterValueError(
+                    "Primitive value of {} was already used for pool name: {}".format(
+                        value,
+                        self._name,
+                        )
+                    )
+
+        if value != self._available_pool_values[0]:
+            raise ParameterValueError("Parameter value not the next in the available list: %s", value)
+
+        self._available_pool_values.pop(0)
+        self._used_pool_values.append(value)
+        self._save_used_pool_values()
+
+    def remove_used_value(self, value):
+        if value not in self._used_pool_values:
+            self._log.warning("Primitive value of %s was never allocated for pool name: %s",
+                    value, self._name
+                    )
+            return
+
+        self._used_pool_values.remove(value)
+        self._available_pool_values.insert(0, value)
+        self._save_used_pool_values()
+
+
+if __name__ == "__main__":
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
+    logger = logging.getLogger("config_value_pool.py")
+    name = str(uuid.uuid4())
+    param_pool = ParameterValuePool(logger, name, range(1000, 2000))
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1000
+
+    param_pool.add_used_value(a)
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1001
+    param_pool.add_used_value(a)
+
+    param_pool = ParameterValuePool(logger, name, range(1000, 2000))
+    a = param_pool.get_next_unused_value()
+    assert a == 1002
+
+    try:
+        param_pool.add_used_value(1004)
+    except ParameterValueError:
+        pass
+    else:
+        assert False
+
+    a = param_pool.get_next_unused_value()
+    assert a == 1002
+    param_pool.add_used_value(1002)
+
+    param_pool = ParameterValuePool(logger, name, range(1005, 2000))
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/openmano_nsm.py
new file mode 100644 (file)
index 0000000..c942003
--- /dev/null
@@ -0,0 +1,709 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import os
+import sys
+import time
+import yaml
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwVnfrYang,
+)
+
+import rift.openmano.rift2openmano as rift2openmano
+import rift.openmano.openmano_client as openmano_client
+from . import rwnsmplugin
+
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+DUMP_OPENMANO_DIR = os.path.join(
+        os.environ["RIFT_ARTIFACTS"],
+        "openmano_descriptors"
+        )
+
+
+def dump_openmano_descriptor(name, descriptor_str):
+    filename = "{}_{}.yaml".format(
+        time.strftime("%Y%m%d-%H%M%S"),
+        name
+        )
+
+    filepath = os.path.join(
+            DUMP_OPENMANO_DIR,
+            filename
+            )
+
+    try:
+        if not os.path.exists(DUMP_OPENMANO_DIR):
+            os.makedirs(DUMP_OPENMANO_DIR)
+
+        with open(filepath, 'w') as hdl:
+            hdl.write(descriptor_str)
+
+    except OSError as e:
+        print("Failed to dump openmano descriptor: %s" % str(e))
+
+    return filepath
+
+class VnfrConsoleOperdataDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+    @property
+    def vnfr_vdu_console_xpath(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+
+    def __init__(self, dts, log, loop, nsr, vnfr_id, vdur_id, vdu_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._nsr = nsr
+
+        self._vnfr_id = vnfr_id
+        self._vdur_id = vdur_id
+        self._vdu_id = vdu_id
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFR VDU Operational Data read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFR VDU Opdata xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("VDU Opdata path is {}".format(path_entry))
+
+                try:
+                    console_url = yield from self._loop.run_in_executor(
+                            None,
+                            self._nsr._http_api.get_instance_vm_console_url,
+                            self._nsr._nsr_uuid,
+                            self._vdur_id
+                            )
+
+                    self._log.debug("Got console response: %s for NSR ID %s vdur ID %s",
+                                        console_url,
+                                        self._nsr._nsr_uuid,
+                                        self._vdur_id
+                                       )
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    if console_url:
+                        vdur_console.console_url = console_url
+                    else:
+                        vdur_console.console_url = 'none'
+                    self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
+                except openmano_client.InstanceStatusError as e:
+                    self._log.error("Could not get NS instance console URL: %s",
+                                        str(e))
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    vdur_console.console_url = 'none'
+                      
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+                                            xpath=self.vnfr_vdu_console_xpath,
+                                            msg=vdur_console)
+            else:
+                #raise VnfRecordError("Not supported operation %s" % action)
+                self._log.error("Not supported operation %s" % action)
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                return 
+
+        self._log.debug("Registering for VNFR VDU using xpath: %s",
+                        self.vnfr_vdu_console_xpath)
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=self.vnfr_vdu_console_xpath,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+
+class OpenmanoVnfr(object):
+    def __init__(self, log, loop, cli_api, vnfr):
+        self._log = log
+        self._loop = loop
+        self._cli_api = cli_api
+        self._vnfr = vnfr
+        self._vnfd_id = vnfr.vnfd.id
+
+        self._vnf_id = None
+
+        self._created = False
+
+    @property
+    def vnfd(self):
+        return rift2openmano.RiftVNFD(self._vnfr.vnfd)
+
+    @property
+    def vnfr(self):
+        return self._vnfr
+
+    @property
+    def rift_vnfd_id(self):
+        return self._vnfd_id
+
+    @property
+    def openmano_vnfd_id(self):
+        return self._vnf_id
+
+    @property
+    def openmano_vnfd(self):
+        self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id)
+        openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd)
+        return openmano_vnfd
+
+    @property
+    def openmano_vnfd_yaml(self):
+        return yaml.safe_dump(self.openmano_vnfd, default_flow_style=False)
+
+    @asyncio.coroutine
+    def create(self):
+        self._log.debug("Creating openmano vnfd")
+        openmano_vnfd = self.openmano_vnfd
+        name = openmano_vnfd["vnf"]["name"]
+
+        # If the name already exists, get the openmano vnfd id
+        name_uuid_map = yield from self._loop.run_in_executor(
+                    None,
+                    self._cli_api.vnf_list,
+                    )
+
+        if name in name_uuid_map:
+            vnf_id = name_uuid_map[name]
+            self._log.debug("Vnf already created.  Got existing openmano vnfd id: %s", vnf_id)
+            self._vnf_id = vnf_id
+            return
+
+        self._vnf_id, _ = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.vnf_create,
+                self.openmano_vnfd_yaml,
+                )
+
+        fpath = dump_openmano_descriptor(
+           "{}_vnf".format(name),
+           self.openmano_vnfd_yaml
+           )
+
+        self._log.debug("Dumped Openmano VNF descriptor to: %s", fpath)
+
+        self._created = True
+
+    @asyncio.coroutine
+    def delete(self):
+        if not self._created:
+            return
+
+        self._log.debug("Deleting openmano vnfd")
+        if self._vnf_id is None:
+            self._log.warning("Openmano vnf id not set.  Cannot delete.")
+            return
+
+        yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.vnf_delete,
+                self._vnf_id,
+                )
+
+
+class OpenmanoNsr(object):
+    TIMEOUT_SECS = 120
+
+    def __init__(self, dts, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._publisher = publisher
+        self._cli_api = cli_api
+        self._http_api = http_api
+
+        self._nsd_msg = nsd_msg
+        self._nsr_config_msg = nsr_config_msg
+
+        self._vnfrs = []
+        self._vdur_console_handler = {}
+
+        self._nsd_uuid = None
+        self._nsr_uuid = None
+
+        self._created = False
+
+        self._monitor_task = None
+
+    @property
+    def nsd(self):
+        return rift2openmano.RiftNSD(self._nsd_msg)
+
+    @property
+    def vnfds(self):
+        return {v.rift_vnfd_id: v.vnfd for v in self._vnfrs}
+
+    @property
+    def vnfrs(self):
+        return self._vnfrs
+
+    @property
+    def openmano_nsd_yaml(self):
+        self._log.debug("Converting nsd %s from rift to openmano", self.nsd.id)
+        openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds)
+        return yaml.safe_dump(openmano_nsd, default_flow_style=False)
+
+
+    @property
+    def openmano_instance_create_yaml(self):
+        self._log.debug("Creating instance-scenario-create input file for nsd %s with name %s", self.nsd.id, self._nsr_config_msg.name)
+        openmano_instance_create = {}
+        openmano_instance_create["name"] = self._nsr_config_msg.name
+        openmano_instance_create["description"] = self._nsr_config_msg.description
+        openmano_instance_create["scenario"] = self._nsd_uuid
+        if self._nsr_config_msg.has_field("om_datacenter"):
+            openmano_instance_create["datacenter"] = self._nsr_config_msg.om_datacenter
+        openmano_instance_create["networks"] = {}
+        for vld_msg in self._nsd_msg.vld:
+            if vld_msg.vim_network_name:
+                network = {}
+                network["name"] = vld_msg.name
+                network["netmap-use"] = vld_msg.vim_network_name
+                #network["datacenter"] = vld_msg.om_datacenter
+                openmano_instance_create["networks"][vld_msg.name] = network 
+             
+        return yaml.safe_dump(openmano_instance_create, default_flow_style=False)
+
+
+    @asyncio.coroutine
+    def add_vnfr(self, vnfr):
+        vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr)
+        yield from vnfr.create()
+        self._vnfrs.append(vnfr)
+
+    @asyncio.coroutine
+    def delete(self):
+        if not self._created:
+            self._log.debug("NSD wasn't created.  Skipping delete.")
+            return
+
+        self._log.debug("Deleting openmano nsr")
+
+        yield from self._loop.run_in_executor(
+               None,
+               self._cli_api.ns_delete,
+               self._nsd_uuid,
+               )
+
+        self._log.debug("Deleting openmano vnfrs")
+        for vnfr in self._vnfrs:
+            yield from vnfr.delete()
+
+    @asyncio.coroutine
+    def create(self):
+        self._log.debug("Creating openmano scenario")
+        name_uuid_map = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_list,
+                )
+
+        if self._nsd_msg.name in name_uuid_map:
+            self._log.debug("Found existing openmano scenario")
+            self._nsd_uuid = name_uuid_map[self._nsd_msg.name]
+            return
+
+
+        # Use the nsd uuid as the scenario name to rebind to existing
+        # scenario on reload or to support muliple instances of the name
+        # nsd
+        self._nsd_uuid, _ = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_create,
+                self.openmano_nsd_yaml,
+                self._nsd_msg.name
+                )
+        fpath = dump_openmano_descriptor(
+           "{}_nsd".format(self._nsd_msg.name),
+           self.openmano_nsd_yaml,
+           )
+
+        self._log.debug("Dumped Openmano NS descriptor to: %s", fpath)
+
+        self._created = True
+
+    @asyncio.coroutine
+    def instance_monitor_task(self):
+        self._log.debug("Starting Instance monitoring task")
+
+        start_time = time.time()
+        active_vnfs = []
+
+        while True:
+            yield from asyncio.sleep(1, loop=self._loop)
+
+            try:
+                instance_resp_json = yield from self._loop.run_in_executor(
+                        None,
+                        self._http_api.get_instance,
+                        self._nsr_uuid,
+                        )
+
+                self._log.debug("Got instance response: %s for NSR ID %s",
+                        instance_resp_json,
+                        self._nsr_uuid)
+
+            except openmano_client.InstanceStatusError as e:
+                self._log.error("Could not get NS instance status: %s", str(e))
+                continue
+
+            def all_vms_active(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status != "ACTIVE":
+                        self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status)
+                        return False
+
+                return True
+
+            def any_vm_active_nomgmtip(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status != "ACTIVE":
+                        self._log.debug("VM is not yet active: %s (status: %s)", vm_uuid, vm_status)
+                        return False
+
+                return True
+
+            def any_vms_error(vnf):
+                for vm in vnf["vms"]:
+                    vm_status = vm["status"]
+                    vm_vim_info = vm["vim_info"]
+                    vm_uuid = vm["uuid"]
+                    if vm_status == "ERROR":
+                        self._log.error("VM Error: %s (vim_info: %s)", vm_uuid, vm_vim_info)
+                        return True
+
+                return False
+
+            def get_vnf_ip_address(vnf):
+                if "ip_address" in vnf:
+                    return vnf["ip_address"].strip()
+                return None
+
+            def get_ext_cp_info(vnf):
+                cp_info_list = []
+                for vm in vnf["vms"]:
+                    if "interfaces" not in vm:
+                        continue
+
+                    for intf in vm["interfaces"]:
+                        if "external_name" not in intf:
+                            continue
+
+                        if not intf["external_name"]:
+                            continue
+
+                        ip_address = intf["ip_address"]
+                        if ip_address is None:
+                            ip_address = "0.0.0.0"
+
+                        cp_info_list.append((intf["external_name"], ip_address))
+
+                return cp_info_list
+
+            def get_vnf_status(vnfr):
+                # When we create an openmano descriptor we use <name>__<idx>
+                # to come up with openmano constituent VNF name.  Use this
+                # knowledge to map the vnfr back.
+                openmano_vnfr_suffix = "__{}".format(
+                        vnfr.vnfr.vnfr_msg.member_vnf_index_ref
+                        )
+
+                for vnf in instance_resp_json["vnfs"]:
+                    if vnf["vnf_name"].endswith(openmano_vnfr_suffix):
+                        return vnf
+
+                self._log.warning("Could not find vnf status with name that ends with: %s",
+                                  openmano_vnfr_suffix)
+                return None
+
+            for vnfr in self._vnfrs:
+                if vnfr in active_vnfs:
+                    # Skipping, so we don't re-publish the same VNF message.
+                    continue
+
+                vnfr_msg = vnfr.vnfr.vnfr_msg.deep_copy()
+                vnfr_msg.operational_status = "init"
+
+                try:
+                    vnf_status = get_vnf_status(vnfr)
+                    self._log.debug("Found VNF status: %s", vnf_status)
+                    if vnf_status is None:
+                        self._log.error("Could not find VNF status from openmano")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                    # If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring.
+                    if any_vms_error(vnf_status):
+                        self._log.debug("VM was found to be in error state.  Marking as failed.")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                    if all_vms_active(vnf_status):
+                        vnf_ip_address = get_vnf_ip_address(vnf_status)
+
+                        if vnf_ip_address is None:
+                            self._log.warning("No IP address obtained "
+                                    "for VNF: {}, will retry.".format(
+                                        vnf_status['vnf_name']))
+                            continue
+
+                        self._log.debug("All VMs in VNF are active.  Marking as running.")
+                        vnfr_msg.operational_status = "running"
+
+                        self._log.debug("Got VNF ip address: %s", vnf_ip_address)
+                        vnfr_msg.mgmt_interface.ip_address = vnf_ip_address
+                        vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = vnf_ip_address
+
+
+                        for vm in vnf_status["vms"]:
+                            if vm["uuid"] not in self._vdur_console_handler:
+                                vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._dts, self._log, self._loop, 
+                                                    self, vnfr_msg.id,vm["uuid"],vm["name"])
+                                yield from vdur_console_handler.register()
+                                self._vdur_console_handler[vm["uuid"]] = vdur_console_handler
+                             
+                            vdur_msg = vnfr_msg.vdur.add()
+                            vdur_msg.vim_id = vm["vim_vm_id"]
+                            vdur_msg.id = vm["uuid"]
+
+                        # Add connection point information for the config manager
+                        cp_info_list = get_ext_cp_info(vnf_status)
+                        for (cp_name, cp_ip) in cp_info_list:
+                            cp = vnfr_msg.connection_point.add()
+                            cp.name = cp_name
+                            cp.short_name = cp_name
+                            cp.ip_address = cp_ip
+
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        active_vnfs.append(vnfr)
+
+                    if (time.time() - start_time) > OpenmanoNsr.TIMEOUT_SECS:
+                        self._log.error("NSR timed out before reaching running state")
+                        vnfr_msg.operational_status = "failed"
+                        yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                        return
+
+                except Exception as e:
+                    vnfr_msg.operational_status = "failed"
+                    yield from self._publisher.publish_vnfr(None, vnfr_msg)
+                    self._log.exception("Caught exception publishing vnfr info: %s", str(e))
+                    return
+
+            if len(active_vnfs) == len(self._vnfrs):
+                self._log.info("All VNF's are active.  Exiting NSR monitoring task")
+                return
+
+    @asyncio.coroutine
+    def deploy(self):
+        if self._nsd_uuid is None:
+            raise ValueError("Cannot deploy an uncreated nsd")
+
+        self._log.debug("Deploying openmano scenario")
+
+        name_uuid_map = yield from self._loop.run_in_executor(
+                None,
+                self._cli_api.ns_instance_list,
+                )
+
+        if self._nsr_config_msg.name in name_uuid_map:
+            self._log.debug("Found existing instance with nsr name: %s", self._nsr_config_msg.name)
+            self._nsr_uuid = name_uuid_map[self._nsr_config_msg.name]
+        else:
+            self._nsr_uuid = yield from self._loop.run_in_executor(
+                    None,
+                    self._cli_api.ns_instance_scenario_create,
+                    self.openmano_instance_create_yaml)
+
+            fpath = dump_openmano_descriptor(
+               "{}_instance_sce_create".format(self._nsr_config_msg.name),
+               self.openmano_instance_create_yaml,
+               )
+
+            self._log.debug("Dumped Openmano NS Scenario Cretae to: %s", fpath)
+
+
+        self._monitor_task = asyncio.ensure_future(
+                self.instance_monitor_task(), loop=self._loop
+                )
+
+    @asyncio.coroutine
+    def terminate(self):
+
+        for _,handler in  self._vdur_console_handler.items():
+            handler._regh.deregister()
+    
+        if self._nsr_uuid is None:
+            self._log.warning("Cannot terminate an un-instantiated nsr")
+            return
+
+        if self._monitor_task is not None:
+            self._monitor_task.cancel()
+            self._monitor_task = None
+
+        self._log.debug("Terminating openmano nsr")
+        yield from self._loop.run_in_executor(
+               None,
+               self._cli_api.ns_terminate,
+               self._nsr_uuid,
+               )
+
+
+class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase):
+    """
+        RW Implentation of the NsmPluginBase
+    """
+    def __init__(self, dts, log, loop, publisher, ro_account):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._publisher = publisher
+
+        self._cli_api = None
+        self._http_api = None
+        self._openmano_nsrs = {}
+
+        self._set_ro_account(ro_account)
+
+    def _set_ro_account(self, ro_account):
+        self._log.debug("Setting openmano plugin cloud account: %s", ro_account)
+        self._cli_api = openmano_client.OpenmanoCliAPI(
+                self.log,
+                ro_account.openmano.host,
+                ro_account.openmano.port,
+                ro_account.openmano.tenant_id,
+                )
+
+        self._http_api = openmano_client.OpenmanoHttpAPI(
+                self.log,
+                ro_account.openmano.host,
+                ro_account.openmano.port,
+                ro_account.openmano.tenant_id,
+                )
+
+    def create_nsr(self, nsr_config_msg, nsd_msg):
+        """
+        Create Network service record
+        """
+        openmano_nsr = OpenmanoNsr(
+                self._dts,
+                self._log,
+                self._loop,
+                self._publisher,
+                self._cli_api,
+                self._http_api,
+                nsd_msg,
+                nsr_config_msg
+                )
+        self._openmano_nsrs[nsr_config_msg.id] = openmano_nsr
+
+    @asyncio.coroutine
+    def deploy(self, nsr_msg):
+        openmano_nsr = self._openmano_nsrs[nsr_msg.ns_instance_config_ref]
+        yield from openmano_nsr.create()
+        yield from openmano_nsr.deploy()
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, xact):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        yield from nsr.instantiate(xact)
+
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        openmano_nsr = self._openmano_nsrs[nsr.id]
+        yield from openmano_nsr.add_vnfr(vnfr)
+
+        # Mark the VNFR as running
+        # TODO: Create a task to monitor nsr/vnfr status
+        vnfr_msg = vnfr.vnfr_msg.deep_copy()
+        vnfr_msg.operational_status = "init"
+
+        self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
+        with self._dts.transaction() as xact:
+            yield from self._publisher.publish_vnfr(xact, vnfr_msg)
+
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vlr):
+        """
+        Instantiate NSR with the passed nsr id
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """
+        Terminate the network service
+        """
+        nsr_id = nsr.id
+        openmano_nsr = self._openmano_nsrs[nsr_id]
+        yield from openmano_nsr.terminate()
+        yield from openmano_nsr.delete()
+
+        with self._dts.transaction() as xact:
+            for vnfr in openmano_nsr.vnfrs:
+                self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr.vnfr_msg)
+                yield from self._publisher.unpublish_vnfr(xact, vnfr.vnfr.vnfr_msg)
+
+        del self._openmano_nsrs[nsr_id]
+
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """
+        Terminate the network service
+        """
+        pass
+
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """
+        Terminate the virtual link
+        """
+        pass
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/publisher.py
new file mode 100644 (file)
index 0000000..6c4b123
--- /dev/null
@@ -0,0 +1,289 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import concurrent.futures
+import json
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwTypes,
+    RwVnfdYang,
+    RwYang
+    )
+import rift.tasklets
+
+import requests
+
+
+class NsrOpDataDtsHandler(object):
+    """ The network service op data DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return the registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr op data publisher registration"""
+        self._log.debug("Registering Nsr op data path %s as publisher",
+                        NsrOpDataDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create an NS record in DTS with the path and message
+        """
+        self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg, flags=rwdts.XactFlag.REPLACE):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh)
+        self.regh.update_element(path, msg, flags)
+        self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Deleting NSR xact:%s, path:%s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted NSR xact:%s, path:%s", xact, path)
+
+
+
+class VnfrPublisherDtsHandler(object):
+    """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Vvnfr create/update/delete/read requests from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+            raise NotImplementedError(
+                "%s action on VirtualNetworkFunctionRecord not supported",
+                action)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrPublisherDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VNFR record in DTS with path and message
+        """
+        self._log.debug("Creating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VNFR record in DTS with path and message
+        """
+        self._log.debug("Updating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
+
+
+class VlrPublisherDtsHandler(object):
+    """ registers 'D,/vlr:vlr-catalog/vlr:vlr """
+    XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vlr create/update/delete/read requests from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vlr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+            raise NotImplementedError(
+                "%s action on VirtualLinkRecord not supported",
+                action)
+
+        self._log.debug("Registering for VLR using xpath: %s",
+                        VlrPublisherDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VLR record in DTS with path and message
+        """
+        self._log.debug("Creating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VLR record in DTS with path and message
+        """
+        self._log.debug("Updating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VLR record in DTS with path and message
+        """
+        self._log.debug("Deleting VLR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VLR xact = %s, %s", xact, path)
+
+
+class VnfdPublisher(object):
+    AUTH = ('admin', 'admin')
+    HEADERS = {"content-type": "application/vnd.yang.data+json"}
+
+
+    def __init__(self, use_ssl, ssl_cert, ssl_key, loop):
+        self.use_ssl = use_ssl
+        self.ssl_cert = ssl_cert
+        self.ssl_key = ssl_key
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+        self.loop = loop
+
+    @asyncio.coroutine
+    def update(self, vnfd):
+        def update(vnfd):
+            """
+            Update VNFD record using rest API, as the config data is handled
+            by uAgent and stored in CDB
+            """
+
+            scheme = "https" if self.use_ssl else "http"
+
+            url = "{}://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}"
+
+            model = RwYang.Model.create_libncx()
+            model.load_module("rw-vnfd")
+            model.load_module("vnfd")
+
+            data = vnfd.to_json(model)
+
+            key = "vnfd:vnfd-catalog"
+            newdict = json.loads(data)
+            if key in newdict:
+                data = json.dumps(newdict[key])
+
+            options = {"data": data,
+                       "headers": VnfdPublisher.HEADERS,
+                       "auth": VnfdPublisher.AUTH}
+
+            if self.use_ssl:
+                options["verify"] = False
+                options["cert"] = (self.ssl_cert, self.ssl_key)
+
+            response = requests.put(
+                url.format(scheme, vnfd.id),
+                **options
+            )
+
+        status = yield from self.loop.run_in_executor(
+            None,
+            update,
+            vnfd
+            )
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsm_conman.py
new file mode 100644 (file)
index 0000000..01c0dcb
--- /dev/null
@@ -0,0 +1,150 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+import re
+import time
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+from gi.repository import (
+    NsrYang as nsrY,
+    RwYang,
+    RwNsmYang as nsmY,
+    RwDts as rwdts,
+    RwTypes,
+    RwConmanYang as conmanY,
+)
+
+import rift.tasklets
+
+class ROConfigManager(object):
+    def __init__(self, log, loop, dts, parent):
+        self._log = log
+        self._loop = loop
+        self._dts = dts
+        self.nsm = parent
+        self._log.debug("Initialized ROConfigManager")
+
+    def is_ready(self):
+        return True
+
+    @property
+    def cm_state_xpath(self):
+        return ("/rw-conman:cm-state/rw-conman:cm-nsr")
+
+    @classmethod
+    def map_config_status(cls, status):
+        cfg_map = {
+            'init': nsrY.ConfigStates.INIT,
+            'received': nsrY.ConfigStates.CONFIGURING,
+            'cfg_delay': nsrY.ConfigStates.CONFIGURING,
+            'cfg_process': nsrY.ConfigStates.CONFIGURING,
+            'cfg_process-failed': nsrY.ConfigStates.CONFIGURING,
+            'cfg_sched': nsrY.ConfigStates.CONFIGURING,
+            'connecting': nsrY.ConfigStates.CONFIGURING,
+            'failed_connection': nsrY.ConfigStates.CONFIGURING,
+            'netconf_connected': nsrY.ConfigStates.CONFIGURING,
+            'netconf_ssh_connected': nsrY.ConfigStates.CONFIGURING,
+            'restconf_connected': nsrY.ConfigStates.CONFIGURING,
+            'cfg_send': nsrY.ConfigStates.CONFIGURING,
+            'cfg_failed': nsrY.ConfigStates.FAILED,
+            'ready_no_cfg': nsrY.ConfigStates.CONFIG_NOT_NEEDED,
+            'ready': nsrY.ConfigStates.CONFIGURED,
+        }
+
+        return cfg_map[status]
+
+    @asyncio.coroutine
+    def update_ns_cfg_state(self, cm_nsr):
+        if cm_nsr is None:
+            return
+
+        try:
+            nsrid = cm_nsr['id']
+
+            # Update the VNFRs' config status
+            gen = []
+            if 'cm_vnfr' in cm_nsr:
+                gen = (vnfr for vnfr in cm_nsr['cm_vnfr']
+                       if vnfr['id'] in self.nsm._vnfrs)
+
+            for vnfr in gen:
+                vnfrid = vnfr['id']
+                new_status = ROConfigManager.map_config_status(vnfr['state'])
+                self._log.debug("Updating config status of VNFR {} " \
+                                "in NSR {} to {}({})".
+                                format(vnfrid, nsrid, new_status,
+                                       vnfr['state']))
+                yield from \
+                    self.nsm.vnfrs[vnfrid].set_config_status(new_status)
+
+            # Update the NSR's config status
+            new_status = ROConfigManager.map_config_status(cm_nsr['state'])
+            self._log.debug("Updating config status of NSR {} to {}({})".
+                                format(nsrid, new_status, cm_nsr['state']))
+            yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
+
+        except Exception as e:
+            self._log.error("Failed to process cm-state for nsr {}: {}".
+                            format(nsrid, e))
+            self._log.exception(e)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for cm-state changes """
+        
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ cm-state changed """
+
+            #print("###>>> cm-state change ({}), msg_dict = {}".format(query_action, msg_dict))
+            self._log.debug("Received cm-state on_prepare (%s:%s:%s)",
+                            query_action,
+                            ks_path,
+                            msg)
+
+            if (query_action == rwdts.QueryAction.UPDATE or
+                query_action == rwdts.QueryAction.CREATE):
+                # Update Each NSR/VNFR state
+                msg_dict = msg.as_dict()
+                yield from self.update_ns_cfg_state(msg_dict)
+            elif query_action == rwdts.QueryAction.DELETE:
+                self._log.debug("DELETE action in on_prepare for cm-state, ignoring")
+            else:
+                raise NotImplementedError(
+                    "%s on cm-state is not supported",
+                    query_action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        try:
+            handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+            self.dts_reg_hdl = yield from self._dts.register(self.cm_state_xpath,
+                                                             flags=rwdts.Flag.SUBSCRIBER,
+                                                             handler=handler)
+        except Exception as e:
+            self._log.error("Failed to register for cm-state changes as %s", str(e))
+            
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmplugin.py
new file mode 100755 (executable)
index 0000000..ec16259
--- /dev/null
@@ -0,0 +1,112 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import abc
+
+
+class NsmPluginBase(object):
+    """
+        Abstract base class for the NSM plugin.
+        There will be single instance of this plugin for each plugin type.
+    """
+
+    def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._plugin_name = plugin_name
+        self._dts_publisher = dts_publisher
+
+    @property
+    def dts(self):
+        return self._dts
+
+    @property
+    def log(self):
+        return self._log
+
+    @property
+    def loop(self):
+        return self._loop
+
+    @property
+    def nsm(self):
+        return self._nsm
+
+    def create_nsr(self, nsr):
+        """ Create an NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def deploy(self, nsr_msg):
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr, xact):
+        """ Instantiate the network service """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_vnf(self, nsr, vnfr):
+        """ Instantiate the virtual network function """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def instantiate_vl(self, nsr, vl):
+        """ Instantiate the virtual link"""
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_nsr(self, nsr_path):
+        """ Get the NSR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_vnfr(self, vnfr_path):
+        """ Get the VNFR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def get_vlr(self, vlr_path):
+        """ Get the VLR """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_ns(self, nsr):
+        """Terminate the network service """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_vnf(self, vnfr):
+        """Terminate the VNF """
+        pass
+
+    @abc.abstractmethod
+    @asyncio.coroutine
+    def terminate_vl(self, vlr):
+        """Terminate the Virtual Link Record"""
+        pass
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwnsmtasklet.py
new file mode 100755 (executable)
index 0000000..de21b5c
--- /dev/null
@@ -0,0 +1,4323 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
+import asyncio
+import ncclient
+import ncclient.asyncio_manager
+import os
+import shutil
+import sys
+import tempfile
+import time
+import uuid
+import yaml
+
+
+from collections import deque
+from collections import defaultdict
+from enum import Enum
+
+import gi
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+    RwYang,
+    RwNsrYang,
+    NsrYang,
+    NsdYang,
+    RwVlrYang,
+    VnfrYang,
+    RwVnfrYang,
+    RwNsmYang,
+    RwsdnYang,
+    RwDts as rwdts,
+    RwTypes,
+    ProtobufC,
+)
+
+import rift.tasklets
+import rift.mano.ncclient
+import rift.mano.config_data.config
+import rift.mano.dts as mano_dts
+
+from . import rwnsm_conman as conman
+from . import cloud
+from . import publisher
+from . import xpath
+from . import config_value_pool
+from . import rwvnffgmgr
+from . import scale_group
+
+
+class NetworkServiceRecordState(Enum):
+    """ Network Service Record State """
+    INIT = 101
+    VL_INIT_PHASE = 102
+    VNF_INIT_PHASE = 103
+    VNFFG_INIT_PHASE = 104
+    RUNNING = 106
+    SCALING_OUT = 107
+    SCALING_IN = 108
+    TERMINATE = 109
+    TERMINATE_RCVD = 110
+    VL_TERMINATE_PHASE = 111
+    VNF_TERMINATE_PHASE = 112
+    VNFFG_TERMINATE_PHASE = 113
+    TERMINATED = 114
+    FAILED = 115
+    VL_INSTANTIATE = 116
+    VL_TERMINATE = 117
+
+
+class NetworkServiceRecordError(Exception):
+    """ Network Service Record Error """
+    pass
+
+
+class NetworkServiceDescriptorError(Exception):
+    """ Network Service Descriptor Error """
+    pass
+
+
+class VirtualNetworkFunctionRecordError(Exception):
+    """ Virtual Network Function Record Error """
+    pass
+
+
+class NetworkServiceDescriptorNotFound(Exception):
+    """ Cannot find Network Service Descriptor"""
+    pass
+
+
+class NetworkServiceDescriptorRefCountExists(Exception):
+    """ Network Service Descriptor reference count exists """
+    pass
+
+
+class NetworkServiceDescriptorUnrefError(Exception):
+    """ Failed to unref a network service descriptor """
+    pass
+
+
+class NsrInstantiationFailed(Exception):
+    """ Failed to instantiate network service """
+    pass
+
+
+class VnfInstantiationFailed(Exception):
+    """ Failed to instantiate virtual network function"""
+    pass
+
+
+class VnffgInstantiationFailed(Exception):
+    """ Failed to instantiate virtual network function"""
+    pass
+
+
+class VnfDescriptorError(Exception):
+    """Failed to instantiate virtual network function"""
+    pass
+
+
+class ScalingOperationError(Exception):
+    pass
+
+
+class ScaleGroupMissingError(Exception):
+    pass
+
+
+class PlacementGroupError(Exception):
+    pass
+
+
+class NsrNsdUpdateError(Exception):
+    pass
+
+
+class NsrVlUpdateError(NsrNsdUpdateError):
+    pass
+
+
+class VlRecordState(Enum):
+    """ VL Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VnffgRecordState(Enum):
+    """ VNFFG Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VnffgRecord(object):
+    """ Vnffg Records class"""
+    SFF_DP_PORT = 4790
+    SFF_MGMT_PORT = 5000
+    def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name):
+
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnffgmgr = vnffgmgr
+        self._nsr = nsr
+        self._nsr_name = nsr_name
+        self._vnffgd_msg = vnffgd_msg
+        if sdn_account_name is None:
+            self._sdn_account_name = ''
+        else:
+            self._sdn_account_name = sdn_account_name
+
+        self._vnffgr_id = str(uuid.uuid4())
+        self._vnffgr_rsp_id = list()
+        self._vnffgr_state = VnffgRecordState.INIT
+
+    @property
+    def id(self):
+        """ VNFFGR id """
+        return self._vnffgr_id
+
+    @property
+    def state(self):
+        """ state of this VNF """
+        return self._vnffgr_state
+
+    def fetch_vnffgr(self):
+        """
+        Get VNFFGR message to be published
+        """
+
+        if self._vnffgr_state == VnffgRecordState.INIT:
+            vnffgr_dict = {"id": self._vnffgr_id,
+                           "nsd_id": self._nsr.nsd_id,
+                           "vnffgd_id_ref": self._vnffgd_msg.id,
+                           "vnffgd_name_ref": self._vnffgd_msg.name,
+                           "sdn_account": self._sdn_account_name,
+                           "operational_status": 'init',
+                           }
+            vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        elif self._vnffgr_state == VnffgRecordState.TERMINATED:
+            vnffgr_dict = {"id": self._vnffgr_id,
+                           "nsd_id": self._nsr.nsd_id,
+                           "vnffgd_id_ref": self._vnffgd_msg.id,
+                           "vnffgd_name_ref": self._vnffgd_msg.name,
+                           "sdn_account": self._sdn_account_name,
+                           "operational_status": 'terminated',
+                           }
+            vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        else:
+            try:
+                vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id)
+            except Exception:
+                self._log.exception("Fetching VNFFGR for VNFFG with id %s failed", self._vnffgr_id)
+                self._vnffgr_state = VnffgRecordState.FAILED
+                vnffgr_dict = {"id": self._vnffgr_id,
+                               "nsd_id": self._nsr.nsd_id,
+                               "vnffgd_id_ref": self._vnffgd_msg.id,
+                               "vnffgd_name_ref": self._vnffgd_msg.name,
+                               "sdn_account": self._sdn_account_name,
+                               "operational_status": 'failed',
+                               }
+                vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+
+        return vnffgr
+
+    @asyncio.coroutine
+    def vnffgr_create_msg(self):
+        """ Virtual Link Record message for Creating VLR in VNS """
+        vnffgr_dict = {"id": self._vnffgr_id,
+                       "nsd_id": self._nsr.nsd_id,
+                       "vnffgd_id_ref": self._vnffgd_msg.id,
+                       "vnffgd_name_ref": self._vnffgd_msg.name,
+                       "sdn_account": self._sdn_account_name,
+                    }
+        vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+        for rsp in self._vnffgd_msg.rsp:
+            vnffgr_rsp = vnffgr.rsp.add()
+            vnffgr_rsp.id = str(uuid.uuid4())
+            vnffgr_rsp.name = self._nsr.name + '.' + rsp.name
+            self._vnffgr_rsp_id.append(vnffgr_rsp.id)
+            vnffgr_rsp.vnffgd_rsp_id_ref =  rsp.id
+            vnffgr_rsp.vnffgd_rsp_name_ref = rsp.name
+            for rsp_cp_ref in rsp.vnfd_connection_point_ref:
+                vnfd =  [vnfr.vnfd for vnfr in self._nsr.vnfrs.values() if vnfr.vnfd.id == rsp_cp_ref.vnfd_id_ref]
+                self._log.debug("VNFD message during VNFFG instantiation is %s",vnfd)
+                if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'):
+                    self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
+                else:
+                    self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref)
+                    continue
+
+                vnfr_cp_ref =  vnffgr_rsp.vnfr_connection_point_ref.add()
+                vnfr_cp_ref.member_vnf_index_ref = rsp_cp_ref.member_vnf_index_ref
+                vnfr_cp_ref.hop_number = rsp_cp_ref.order
+                vnfr_cp_ref.vnfd_id_ref =rsp_cp_ref.vnfd_id_ref
+                vnfr_cp_ref.service_function_type = vnfd[0].service_function_type
+                for nsr_vnfr in self._nsr.vnfrs.values():
+                   if (nsr_vnfr.vnfd.id == vnfr_cp_ref.vnfd_id_ref and
+                      nsr_vnfr.member_vnf_index == vnfr_cp_ref.member_vnf_index_ref):
+                       vnfr_cp_ref.vnfr_id_ref = nsr_vnfr.id
+                       vnfr_cp_ref.vnfr_name_ref = nsr_vnfr.name
+                       vnfr_cp_ref.vnfr_connection_point_ref = rsp_cp_ref.vnfd_connection_point_ref
+
+                       vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                       self._log.debug(" Received VNFR is %s", vnfr)
+                       while vnfr.operational_status != 'running':
+                           self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                           if vnfr.operational_status == 'failed':
+                               self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                               raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                           yield from asyncio.sleep(2, loop=self._loop)
+                           vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                           self._log.debug("Received VNFR is %s", vnfr)
+
+                       vnfr_cp_ref.connection_point_params.mgmt_address =  vnfr.mgmt_interface.ip_address
+                       for cp in vnfr.connection_point:
+                           if cp.name == vnfr_cp_ref.vnfr_connection_point_ref:
+                               vnfr_cp_ref.connection_point_params.port_id = cp.connection_point_id
+                               vnfr_cp_ref.connection_point_params.name = self._nsr.name + '.' + cp.name
+                               for vdu in vnfr.vdur:
+                                   for ext_intf in vdu.external_interface:
+                                       if ext_intf.name == vnfr_cp_ref.vnfr_connection_point_ref:
+                                           vnfr_cp_ref.connection_point_params.vm_id =  vdu.vim_id
+                                           self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
+                                                            vnfr_cp_ref.connection_point_params.vm_id)
+                                           break
+
+                               vnfr_cp_ref.connection_point_params.address =  cp.ip_address
+                               vnfr_cp_ref.connection_point_params.port = VnffgRecord.SFF_DP_PORT
+
+        for vnffgd_classifier in self._vnffgd_msg.classifier:
+            _rsp =  [rsp for rsp in vnffgr.rsp if rsp.vnffgd_rsp_id_ref == vnffgd_classifier.rsp_id_ref]
+            if len(_rsp) > 0:
+                rsp_id_ref = _rsp[0].id
+                rsp_name = _rsp[0].name
+            else:
+                self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
+                continue
+            vnffgr_classifier = vnffgr.classifier.add()
+            vnffgr_classifier.id = vnffgd_classifier.id
+            vnffgr_classifier.name =  self._nsr.name + '.' + vnffgd_classifier.name
+            _rsp[0].classifier_name = vnffgr_classifier.name
+            vnffgr_classifier.rsp_id_ref = rsp_id_ref
+            vnffgr_classifier.rsp_name = rsp_name
+            for nsr_vnfr in self._nsr.vnfrs.values():
+               if (nsr_vnfr.vnfd.id == vnffgd_classifier.vnfd_id_ref and
+                      nsr_vnfr.member_vnf_index == vnffgd_classifier.member_vnf_index_ref):
+                       vnffgr_classifier.vnfr_id_ref = nsr_vnfr.id
+                       vnffgr_classifier.vnfr_name_ref = nsr_vnfr.name
+                       vnffgr_classifier.vnfr_connection_point_ref = vnffgd_classifier.vnfd_connection_point_ref
+
+                       if nsr_vnfr.vnfd.service_function_chain == 'CLASSIFIER':
+                           vnffgr_classifier.sff_name = nsr_vnfr.name
+
+                       vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                       self._log.debug(" Received VNFR is %s", vnfr)
+                       while vnfr.operational_status != 'running':
+                           self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                           if vnfr.operational_status == 'failed':
+                               self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                               raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                           yield from asyncio.sleep(2, loop=self._loop)
+                           vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                           self._log.debug("Received VNFR is %s", vnfr)
+
+                       for cp in vnfr.connection_point:
+                           if cp.name == vnffgr_classifier.vnfr_connection_point_ref:
+                               vnffgr_classifier.port_id = cp.connection_point_id
+                               vnffgr_classifier.ip_address = cp.ip_address
+                               for vdu in vnfr.vdur:
+                                   for ext_intf in vdu.external_interface:
+                                       if ext_intf.name == vnffgr_classifier.vnfr_connection_point_ref:
+                                           vnffgr_classifier.vm_id =  vdu.vim_id
+                                           self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
+                                                            vnfr_cp_ref.connection_point_params.vm_id)
+                                           break
+
+        self._log.info("VNFFGR msg to be sent is %s", vnffgr)
+        return vnffgr
+
+    @asyncio.coroutine
+    def vnffgr_nsr_sff_list(self):
+        """ SFF List for VNFR """
+        sff_list = {}
+        sf_list = [nsr_vnfr.name for nsr_vnfr in self._nsr.vnfrs.values() if nsr_vnfr.vnfd.service_function_chain == 'SF']
+
+        for nsr_vnfr in self._nsr.vnfrs.values():
+            if (nsr_vnfr.vnfd.service_function_chain == 'CLASSIFIER' or nsr_vnfr.vnfd.service_function_chain == 'SFF'):
+                vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                self._log.debug(" Received VNFR is %s", vnfr)
+                while vnfr.operational_status != 'running':
+                    self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
+                    if vnfr.operational_status == 'failed':
+                       self._log.error("Fetching VNFR for  %s failed", vnfr.id)
+                       raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+                    yield from asyncio.sleep(2, loop=self._loop)
+                    vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
+                    self._log.debug("Received VNFR is %s", vnfr)
+
+                sff =  RwsdnYang.VNFFGSff()
+                sff_list[nsr_vnfr.vnfd.id] = sff
+                sff.name = nsr_vnfr.name
+                sff.function_type = nsr_vnfr.vnfd.service_function_chain
+
+                sff.mgmt_address = vnfr.mgmt_interface.ip_address
+                sff.mgmt_port = VnffgRecord.SFF_MGMT_PORT
+                for cp in vnfr.connection_point:
+                    sff_dp = sff.dp_endpoints.add()
+                    sff_dp.name = self._nsr.name + '.' + cp.name
+                    sff_dp.address = cp.ip_address
+                    sff_dp.port  = VnffgRecord.SFF_DP_PORT
+                if nsr_vnfr.vnfd.service_function_chain == 'SFF':
+                    for sf_name in sf_list:
+                        _sf = sff.vnfr_list.add()
+                        _sf.vnfr_name = sf_name
+
+        return sff_list
+
+    @asyncio.coroutine
+    def instantiate(self):
+        """ Instantiate this VNFFG """
+
+        self._log.info("Instaniating VNFFGR with vnffgd %s",
+                       self._vnffgd_msg)
+
+
+        vnffgr_request = yield from self.vnffgr_create_msg()
+        vnffg_sff_list = yield from self.vnffgr_nsr_sff_list()
+
+        try:
+            vnffgr = self._vnffgmgr.create_vnffgr(vnffgr_request,self._vnffgd_msg.classifier,vnffg_sff_list)
+        except Exception as e:
+            self._log.exception("VNFFG instantiation failed: %s", str(e))
+            self._vnffgr_state = VnffgRecordState.FAILED
+            raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFFGR %s failure" % (self.id, vnffgr_request.id))
+
+        self._vnffgr_state = VnffgRecordState.INSTANTIATION_PENDING
+
+        self._log.info("Instantiated VNFFGR :%s", vnffgr)
+        self._vnffgr_state = VnffgRecordState.ACTIVE
+
+        self._log.info("Invoking update_state to update NSR state for NSR ID: %s", self._nsr.id)
+        yield from self._nsr.update_state()
+
+    def vnffgr_in_vnffgrm(self):
+        """ Is there a VNFR record in VNFM """
+        if (self._vnffgr_state == VnffgRecordState.ACTIVE or
+                self._vnffgr_state == VnffgRecordState.INSTANTIATION_PENDING or
+                self._vnffgr_state == VnffgRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VNFFGR """
+        if not self.vnffgr_in_vnffgrm():
+            self._log.error("Ignoring terminate request for id %s in state %s",
+                            self.id, self._vnffgr_state)
+            return
+
+        self._log.info("Terminating VNFFGR id:%s", self.id)
+        self._vnffgr_state = VnffgRecordState.TERMINATE_PENDING
+
+        self._vnffgmgr.terminate_vnffgr(self._vnffgr_id)
+
+        self._vnffgr_state = VnffgRecordState.TERMINATED
+        self._log.debug("Terminated VNFFGR id:%s", self.id)
+
+
+class VirtualLinkRecord(object):
+    """ Virtual Link Records class"""
+    @staticmethod
+    @asyncio.coroutine
+    def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, ip_profile, nsr_id, restart_mode=False):
+        """Creates a new VLR object based on the given data.
+
+        If restart mode is enabled, then we look for existing records in the
+        DTS and create a VLR records using the exiting data(ID)
+
+        Returns:
+            VirtualLinkRecord
+        """
+        vlr_obj = VirtualLinkRecord(
+                      dts,
+                      log,
+                      loop,
+                      nsr_name,
+                      vld_msg,
+                      cloud_account_name,
+                      ip_profile,
+                      nsr_id,
+                      )
+
+        if restart_mode:
+            res_iter = yield from dts.query_read(
+                              "D,/vlr:vlr-catalog/vlr:vlr",
+                              rwdts.XactFlag.MERGE)
+
+            for fut in res_iter:
+                response = yield from fut
+                vlr = response.result
+
+                # Check if the record is already present, if so use the ID of
+                # the existing record. Since the name of the record is uniquely
+                # formed we can use it as a search key!
+                if vlr.name == vlr_obj.name:
+                    vlr_obj.reset_id(vlr.id)
+                    break
+
+        return vlr_obj
+
+    def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name, ip_profile, nsr_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_name = nsr_name
+        self._vld_msg = vld_msg
+        self._cloud_account_name = cloud_account_name
+        self._assigned_subnet = None
+        self._nsr_id = nsr_id
+        self._ip_profile = ip_profile
+        self._vlr_id = str(uuid.uuid4())
+        self._state = VlRecordState.INIT
+        self._prev_state = None
+        
+    @property
+    def xpath(self):
+        """ path for this object """
+        return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id)
+
+    @property
+    def id(self):
+        """ VLR id """
+        return self._vlr_id
+
+    @property
+    def nsr_name(self):
+        """ Get NSR name for this VL """
+        return self.nsr_name
+
+    @property
+    def vld_msg(self):
+        """ Virtual Link Desciptor """
+        return self._vld_msg
+
+    @property
+    def assigned_subnet(self):
+        """ Subnet assigned to this VL"""
+        return self._assigned_subnet
+
+    @property
+    def name(self):
+        """
+        Get the name for this VLR.
+        VLR name is "nsr name:VLD name"
+        """
+        if self.vld_msg.vim_network_name:
+            return self.vld_msg.vim_network_name
+        elif self.vld_msg.name == "multisite":
+            # This is a temporary hack to identify manually provisioned inter-site network
+            return self.vld_msg.name
+        else:
+            return self._nsr_name + "." + self.vld_msg.name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account that this VLR should be created in """
+        return self._cloud_account_name
+
+    @staticmethod
+    def vlr_xpath(vlr):
+        """ Get the VLR path from VLR """
+        return (VirtualLinkRecord.XPATH + "[vlr:id = '{}']").format(vlr.id)
+
+    @property
+    def state(self):
+        """ VLR state """
+        return self._state
+
+    @state.setter
+    def state(self, value):
+        """ VLR set state """
+        self._state = value
+
+    @property
+    def prev_state(self):
+        """ VLR previous state """
+        return self._prev_state
+
+    @prev_state.setter
+    def prev_state(self, value):
+        """ VLR set previous state """
+        self._prev_state = value
+
+    @property
+    def vlr_msg(self):
+        """ Virtual Link Record message for Creating VLR in VNS """
+        vld_fields = ["short_name",
+                      "vendor",
+                      "description",
+                      "version",
+                      "type_yang",
+                      "vim_network_name",
+                      "provider_network"]
+
+        vld_copy_dict = {k: v for k, v in self.vld_msg.as_dict().items()
+                         if k in vld_fields}
+
+        vlr_dict = {"id": self._vlr_id,
+                    "nsr_id_ref": self._nsr_id,
+                    "vld_ref": self.vld_msg.id,
+                    "name": self.name,
+                    "cloud_account": self.cloud_account_name,
+                    }
+
+        if self._ip_profile and self._ip_profile.has_field('ip_profile_params'):
+            vlr_dict['ip_profile_params' ] = self._ip_profile.ip_profile_params.as_dict()
+
+        vlr_dict.update(vld_copy_dict)
+        vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+        return vlr
+
+    def reset_id(self, vlr_id):
+        self._vlr_id = vlr_id
+
+    def create_nsr_vlr_msg(self, vnfrs):
+        """ The VLR message"""
+        nsr_vlr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr()
+        nsr_vlr.vlr_ref = self._vlr_id
+        nsr_vlr.assigned_subnet = self.assigned_subnet
+        nsr_vlr.cloud_account = self.cloud_account_name
+
+        for conn in self.vld_msg.vnfd_connection_point_ref:
+            for vnfr in vnfrs:
+                if (vnfr.vnfd.id == conn.vnfd_id_ref and
+                        vnfr.member_vnf_index == conn.member_vnf_index_ref and
+                        self.cloud_account_name == vnfr.cloud_account_name):
+                    cp_entry = nsr_vlr.vnfr_connection_point_ref.add()
+                    cp_entry.vnfr_id = vnfr.id
+                    cp_entry.connection_point = conn.vnfd_connection_point_ref
+
+        return nsr_vlr
+
+    @asyncio.coroutine
+    def instantiate(self):
+        """ Instantiate this VL """
+
+        self._log.debug("Instaniating VLR key %s, vld %s",
+                        self.xpath, self._vld_msg)
+        vlr = None
+        self._state = VlRecordState.INSTANTIATION_PENDING
+        self._log.debug("Executing VL create path:%s msg:%s",
+                        self.xpath, self.vlr_msg)
+
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_create(self.xpath, self.vlr_msg)
+            self._log.debug("Executing VL create path:%s msg:%s",
+                            self.xpath, self.vlr_msg)
+            res_iter = yield from block.execute(now=True)
+            for ent in res_iter:
+                res = yield from ent
+                vlr = res.result
+
+            if vlr is None:
+                self._state = VlRecordState.FAILED
+                raise NsrInstantiationFailed("Failed NS %s instantiation due to empty response" % self.id)
+
+        if vlr.operational_status == 'failed':
+            self._log.debug("NS Id:%s VL creation failed for vlr id %s", self.id, vlr.id)
+            self._state = VlRecordState.FAILED
+            raise NsrInstantiationFailed("Failed VL %s instantiation (%s)" % (vlr.id, vlr.operational_status_details))
+
+        self._log.info("Instantiated VL with xpath %s and vlr:%s",
+                       self.xpath, vlr)
+        self._state = VlRecordState.ACTIVE
+        self._assigned_subnet = vlr.assigned_subnet
+
+    def vlr_in_vns(self):
+        """ Is there a VLR record in VNS """
+        if (self._state == VlRecordState.ACTIVE or
+            self._state == VlRecordState.INSTANTIATION_PENDING or
+            self._state == VlRecordState.TERMINATE_PENDING or
+            self._state == VlRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VL """
+        if not self.vlr_in_vns():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.id, self._state)
+            return
+
+        self._log.debug("Terminating VL id:%s", self.id)
+        self._state = VlRecordState.TERMINATE_PENDING
+
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_delete(self.xpath)
+            yield from block.execute(flags=0, now=True)
+
+        self._state = VlRecordState.TERMINATED
+        self._log.debug("Terminated VL id:%s", self.id)
+
+
+class VnfRecordState(Enum):
+    """ Vnf Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class VirtualNetworkFunctionRecord(object):
+    """ Virtual Network Function Record class"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    @staticmethod
+    @asyncio.coroutine
+    def create_record(dts, log, loop, vnfd, const_vnfd_msg, nsd_id, nsr_name,
+                cloud_account_name, nsr_id, group_name, group_instance_id,
+                placement_groups, restart_mode=False):
+        """Creates a new VNFR object based on the given data.
+
+        If restart mode is enabled, then we look for existing records in the
+        DTS and create a VNFR records using the exiting data(ID)
+
+        Returns:
+            VirtualNetworkFunctionRecord
+        """
+        vnfr_obj = VirtualNetworkFunctionRecord(
+                          dts,
+                          log,
+                          loop,
+                          vnfd,
+                          const_vnfd_msg,
+                          nsd_id,
+                          nsr_name,
+                          cloud_account_name,
+                          nsr_id,
+                          group_name,
+                          group_instance_id,
+                          placement_groups,
+                          restart_mode=restart_mode)
+
+        if restart_mode:
+            res_iter = yield from dts.query_read(
+                              "D,/vnfr:vnfr-catalog/vnfr:vnfr",
+                              rwdts.XactFlag.MERGE)
+
+            for fut in res_iter:
+                response = yield from fut
+                vnfr = response.result
+
+                if vnfr.name == vnfr_obj.name:
+                    vnfr_obj.reset_id(vnfr.id)
+                    break
+
+        return vnfr_obj
+
+    def __init__(self,
+                 dts,
+                 log,
+                 loop,
+                 vnfd,
+                 const_vnfd_msg,
+                 nsd_id,
+                 nsr_name,
+                 cloud_account_name,
+                 nsr_id,
+                 group_name=None,
+                 group_instance_id=None,
+                 placement_groups = [],
+                 restart_mode = False):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfd = vnfd
+        self._const_vnfd_msg = const_vnfd_msg
+        self._nsd_id = nsd_id
+        self._nsr_name = nsr_name
+        self._nsr_id = nsr_id
+        self._cloud_account_name = cloud_account_name
+        self._group_name = group_name
+        self._group_instance_id = group_instance_id
+        self._placement_groups = placement_groups
+        self._config_status = NsrYang.ConfigStates.INIT
+
+        self._prev_state = VnfRecordState.INIT
+        self._state = VnfRecordState.INIT
+        self._state_failed_reason = None
+
+        self.config_store = rift.mano.config_data.config.ConfigStore(self._log)
+        self.configure()
+
+        self._vnfr_id = str(uuid.uuid4())
+        self._name = None
+        self._vnfr_msg = self.create_vnfr_msg()
+        self._log.debug("Set VNFR {} config type to {}".
+                        format(self.name, self.config_type))
+        self.restart_mode = restart_mode
+
+
+        if group_name is None and group_instance_id is not None:
+            raise ValueError("Group instance id must not be provided with an empty group name")
+
+    @property
+    def id(self):
+        """ VNFR id """
+        return self._vnfr_id
+
+    @property
+    def xpath(self):
+        """ VNFR xpath """
+        return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+
+    @property
+    def vnfr_msg(self):
+        """ VNFR message """
+        return self._vnfr_msg
+
+    @property
+    def const_vnfr_msg(self):
+        """ VNFR message """
+        return RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConstituentVnfrRef(vnfr_id=self.id,cloud_account=self.cloud_account_name)
+
+    @property
+    def vnfd(self):
+        """ vnfd """
+        return self._vnfd
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account that this VNF should be created in """
+        return self._cloud_account_name
+
+
+    @property
+    def active(self):
+        """ Is this VNF actve """
+        return True if self._state == VnfRecordState.ACTIVE else False
+
+    @property
+    def state(self):
+        """ state of this VNF """
+        return self._state
+
+    @property
+    def state_failed_reason(self):
+        """ Error message in case this VNF is in failed state """
+        return self._state_failed_reason
+
+    @property
+    def member_vnf_index(self):
+        """ Member VNF index """
+        return self._const_vnfd_msg.member_vnf_index
+
+    @property
+    def nsr_name(self):
+        """ NSR name"""
+        return self._nsr_name
+
+    @property
+    def name(self):
+        """ Name of this VNFR """
+        if self._name is not None:
+            return self._name
+
+        name_tags = [self._nsr_name]
+
+        if self._group_name is not None:
+            name_tags.append(self._group_name)
+
+        if self._group_instance_id is not None:
+            name_tags.append(str(self._group_instance_id))
+
+        name_tags.extend([self.vnfd.name, str(self.member_vnf_index)])
+
+        self._name = "__".join(name_tags)
+
+        return self._name
+
+    @staticmethod
+    def vnfr_xpath(vnfr):
+        """ Get the VNFR path from VNFR """
+        return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id)
+
+    @property
+    def config_type(self):
+        cfg_types = ['netconf', 'juju', 'script']
+        for method in cfg_types:
+            if self._vnfd.vnf_configuration.has_field(method):
+                return method
+        return 'none'
+
+    @property
+    def config_status(self):
+        """Return the config status as YANG ENUM string"""
+        self._log.debug("Map VNFR {} config status {} ({})".
+                        format(self.name, self._config_status, self.config_type))
+        if self.config_type == 'none':
+            return 'config_not_needed'
+        elif self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            return 'configured'
+        elif self._config_status == NsrYang.ConfigStates.FAILED:
+            return 'failed'
+
+        return 'configuring'
+
+    def set_state(self, state):
+        """ set the state of this object """
+        self._prev_state = self._state
+        self._state = state
+
+    def reset_id(self, vnfr_id):
+        self._vnfr_id = vnfr_id
+        self._vnfr_msg = self.create_vnfr_msg()
+
+    def configure(self):
+        self.config_store.merge_vnfd_config(
+                    self._nsd_id,
+                    self._vnfd,
+                    self.member_vnf_index,
+                    )
+
+    def create_vnfr_msg(self):
+        """ VNFR message for this VNFR """
+        vnfd_fields = [
+                "short_name",
+                "vendor",
+                "description",
+                "version",
+                "type_yang",
+                ]
+        vnfd_copy_dict = {k: v for k, v in self._vnfd.as_dict().items() if k in vnfd_fields}
+        vnfr_dict = {
+                "id": self.id,
+                "nsr_id_ref": self._nsr_id,
+                "vnfd_ref": self.vnfd.id,
+                "name": self.name,
+                "cloud_account": self._cloud_account_name,
+                "config_status": self.config_status
+                }
+        vnfr_dict.update(vnfd_copy_dict)
+
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+        vnfr.member_vnf_index_ref = self.member_vnf_index
+        vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
+
+        if self._vnfd.mgmt_interface.has_field("port"):
+            vnfr.mgmt_interface.port = self._vnfd.mgmt_interface.port
+
+        for group_info in self._placement_groups:
+            group = vnfr.placement_groups_info.add()
+            group.from_dict(group_info.as_dict())
+
+        # UI expects the monitoring param field to exist
+        vnfr.monitoring_param = []
+
+        self._log.debug("Get vnfr_msg for VNFR {} : {}".format(self.name, vnfr))
+        return vnfr
+
+    @asyncio.coroutine
+    def update_vnfm(self):
+        self._log.debug("Send an update to VNFM for VNFR {} with {}".
+                        format(self.name, self.vnfr_msg))
+        yield from self._dts.query_update(
+                self.xpath,
+                rwdts.XactFlag.TRACE,
+                self.vnfr_msg
+                )
+
+    def get_config_status(self):
+        """Return the config status as YANG ENUM"""
+        return self._config_status
+
+    @asyncio.coroutine
+    def set_config_status(self, status):
+
+        def status_to_string(status):
+            status_dc = {
+                NsrYang.ConfigStates.INIT : 'init',
+                NsrYang.ConfigStates.CONFIGURING : 'configuring',
+                NsrYang.ConfigStates.CONFIG_NOT_NEEDED : 'config_not_needed',
+                NsrYang.ConfigStates.CONFIGURED : 'configured',
+                NsrYang.ConfigStates.FAILED : 'failed',
+            }
+
+            return status_dc[status]
+
+        self._log.debug("Update VNFR {} from {} ({}) to {}".
+                        format(self.name, self._config_status,
+                               self.config_type, status))
+        if self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            self._log.error("Updating already configured VNFR {}".
+                            format(self.name))
+            return
+
+        if self._config_status != status:
+            try:
+                self._config_status = status
+                # I don't think this is used. Original implementor can check.
+                # Caused Exception, so corrected it by status_to_string
+                # But not sure whats the use of this variable?
+                self.vnfr_msg.config_status = status_to_string(status)
+            except Exception as e:
+                self._log.error("Exception=%s", str(e))
+                pass
+
+            self._log.debug("Updated VNFR {} status to {}".format(self.name, status))
+
+            if self._config_status != NsrYang.ConfigStates.INIT:
+                try:
+                    # Publish only after VNFM has the VNFR created
+                    yield from self.update_vnfm()
+                except Exception as e:
+                    self._log.error("Exception updating VNFM with new status {} of VNFR {}: {}".
+                                format(status, self.name, e))
+                    self._log.exception(e)
+
+    def is_configured(self):
+        if self.config_type == 'none':
+            return True
+
+        if self._config_status == NsrYang.ConfigStates.CONFIGURED:
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def instantiate(self, nsr):
+        """ Instantiate this VNFR"""
+
+        self._log.debug("Instaniating VNFR key %s, vnfd %s",
+                        self.xpath, self._vnfd)
+
+        self._log.debug("Create VNF with xpath %s and vnfr %s",
+                        self.xpath, self.vnfr_msg)
+
+        self.set_state(VnfRecordState.INSTANTIATION_PENDING)
+
+        def find_vlr_for_cp(conn):
+            """ Find VLR for the given connection point """
+            for vlr in nsr.vlrs:
+                for vnfd_cp in vlr.vld_msg.vnfd_connection_point_ref:
+                    if (vnfd_cp.vnfd_id_ref == self._vnfd.id and
+                            vnfd_cp.vnfd_connection_point_ref == conn.name and
+                            vnfd_cp.member_vnf_index_ref == self.member_vnf_index and
+                             vlr.cloud_account_name == self.cloud_account_name):
+                        self._log.debug("Found VLR for cp_name:%s and vnf-index:%d",
+                                        conn.name, self.member_vnf_index)
+                        return vlr
+            return None
+
+        # For every connection point in the VNFD fill in the identifier
+        for conn_p in self._vnfd.connection_point:
+            cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint()
+            cpr.name = conn_p.name
+            cpr.type_yang = conn_p.type_yang
+            vlr_ref = find_vlr_for_cp(conn_p)
+            if vlr_ref is None:
+                msg = "Failed to find VLR for cp = %s" % conn_p.name
+                self._log.debug("%s", msg)
+#                raise VirtualNetworkFunctionRecordError(msg)
+                continue
+
+            cpr.vlr_ref = vlr_ref.id
+            self.vnfr_msg.connection_point.append(cpr)
+            self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s",
+                            cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd_ref)
+
+        if not self.restart_mode:
+            yield from self._dts.query_create(self.xpath,
+                                              0,   # this is sub
+                                              self.vnfr_msg)
+        else:
+            yield from self._dts.query_update(self.xpath,
+                                              0,
+                                              self.vnfr_msg)
+
+        self._log.info("Created VNF with xpath %s and vnfr %s",
+                       self.xpath, self.vnfr_msg)
+
+        self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s",
+                       self.xpath, self._vnfd, self.vnfr_msg)
+
+    @asyncio.coroutine
+    def update_state(self, vnfr_msg):
+        """ Update this VNFR"""
+        if vnfr_msg.operational_status == "running":
+            if self.vnfr_msg.operational_status != "running":
+                yield from self.is_active()
+        elif vnfr_msg.operational_status == "failed":
+            yield from self.instantiation_failed(failed_reason=vnfr_msg.operational_status_details)
+
+    @asyncio.coroutine
+    def is_active(self):
+        """ This VNFR is active """
+        self._log.debug("VNFR %s is active", self._vnfr_id)
+        self.set_state(VnfRecordState.ACTIVE)
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ This VNFR instantiation failed"""
+        self._log.error("VNFR %s instantiation failed", self._vnfr_id)
+        self.set_state(VnfRecordState.FAILED)
+        self._state_failed_reason = failed_reason
+
+    def vnfr_in_vnfm(self):
+        """ Is there a VNFR record in VNFM """
+        if (self._state == VnfRecordState.ACTIVE or
+                self._state == VnfRecordState.INSTANTIATION_PENDING or
+                self._state == VnfRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate this VNF """
+        if not self.vnfr_in_vnfm():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.id, self._state)
+            return
+
+        self._log.debug("Terminating VNF id:%s", self.id)
+        self.set_state(VnfRecordState.TERMINATE_PENDING)
+        with self._dts.transaction(flags=0) as xact:
+            block = xact.block_create()
+            block.add_query_delete(self.xpath)
+            yield from block.execute(flags=0)
+        self.set_state(VnfRecordState.TERMINATED)
+        self._log.debug("Terminated VNF id:%s", self.id)
+
+
+class NetworkServiceStatus(object):
+    """ A class representing the Network service's status """
+    MAX_EVENTS_RECORDED = 10
+    """ Network service Status class"""
+    def __init__(self, dts, log, loop):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._state = NetworkServiceRecordState.INIT
+        self._events = deque([])
+
+    @asyncio.coroutine
+    def create_notification(self, evt, evt_desc, evt_details):
+        xp = "N,/rw-nsr:nsm-notification"
+        notif = RwNsrYang.YangNotif_RwNsr_NsmNotification()
+        notif.event = evt
+        notif.description = evt_desc
+        notif.details = evt_details if evt_details is not None else None
+
+        yield from self._dts.query_create(xp, rwdts.XactFlag.ADVISE, notif)
+        self._log.info("Notification called by creating dts query: %s", notif)
+
+    def record_event(self, evt, evt_desc, evt_details):
+        """ Record an event """
+        self._log.debug("Recording event - evt %s, evt_descr %s len = %s",
+                        evt, evt_desc, len(self._events))
+        if len(self._events) >= NetworkServiceStatus.MAX_EVENTS_RECORDED:
+            self._events.popleft()
+        self._events.append((int(time.time()), evt, evt_desc,
+                             evt_details if evt_details is not None else None))
+
+        self._loop.create_task(self.create_notification(evt,evt_desc,evt_details))
+
+    def set_state(self, state):
+        """ set the state of this status object """
+        self._state = state
+
+    def yang_str(self):
+        """ Return the state as a yang enum string """
+        state_to_str_map = {"INIT": "init",
+                            "VL_INIT_PHASE": "vl_init_phase",
+                            "VNF_INIT_PHASE": "vnf_init_phase",
+                            "VNFFG_INIT_PHASE": "vnffg_init_phase",
+                            "SCALING_GROUP_INIT_PHASE": "scaling_group_init_phase",
+                            "RUNNING": "running",
+                            "SCALING_OUT": "scaling_out",
+                            "SCALING_IN": "scaling_in",
+                            "TERMINATE_RCVD": "terminate_rcvd",
+                            "TERMINATE": "terminate",
+                            "VL_TERMINATE_PHASE": "vl_terminate_phase",
+                            "VNF_TERMINATE_PHASE": "vnf_terminate_phase",
+                            "VNFFG_TERMINATE_PHASE": "vnffg_terminate_phase",
+                            "TERMINATED": "terminated",
+                            "FAILED": "failed",
+                            "VL_INSTANTIATE": "vl_instantiate",
+                            "VL_TERMINATE": "vl_terminate",
+        }
+        return state_to_str_map[self._state.name]
+
+    @property
+    def state(self):
+        """ State of this status object """
+        return self._state
+
+    @property
+    def msg(self):
+        """ Network Service Record as a message"""
+        event_list = []
+        idx = 1
+        for entry in self._events:
+            event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents()
+            event.id = idx
+            idx += 1
+            event.timestamp, event.event, event.description, event.details = entry
+            event_list.append(event)
+        return event_list
+
+
+class NetworkServiceRecord(object):
+    """ Network service record """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, restart_mode=False):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._nsr_cfg_msg = nsr_cfg_msg
+        self._nsm_plugin = nsm_plugin
+        self._sdn_account_name = sdn_account_name
+
+        self._nsd = None
+        self._nsr_msg = None
+        self._nsr_regh = None
+        self._vlrs = []
+        self._vnfrs = {}
+        self._vnfds = {}
+        self._vnffgrs = {}
+        self._param_pools = {}
+        self._scaling_groups = {}
+        self._create_time = int(time.time())
+        self._op_status = NetworkServiceStatus(dts, log, loop)
+        self._config_status = NsrYang.ConfigStates.CONFIGURING
+        self._config_status_details = None
+        self._job_id = 0
+        self.restart_mode = restart_mode
+        self.config_store = rift.mano.config_data.config.ConfigStore(self._log)
+        self._debug_running = False
+        self._is_active = False
+        self._vl_phase_completed = False
+        self._vnf_phase_completed = False
+
+
+        # Initalise the state to init
+        # The NSR moves through the following transitions
+        # 1. INIT -> VLS_READY once all the VLs in the NSD are created
+        # 2. VLS_READY - VNFS_READY when all the VNFs in the NSD are created
+        # 3. VNFS_READY - READY when the NSR is published
+
+        self.set_state(NetworkServiceRecordState.INIT)
+
+        self.substitute_input_parameters = InputParameterSubstitution(self._log)
+
+    @property
+    def nsm_plugin(self):
+        """ NSM Plugin """
+        return self._nsm_plugin
+
+    def set_state(self, state):
+        """ Set state for this NSR"""
+        self._log.debug("Setting state to %s", state)
+        # We are in init phase and is moving to the next state
+        # The new state could be a FAILED state or VNF_INIIT_PHASE
+        if self.state == NetworkServiceRecordState.VL_INIT_PHASE:
+            self._vl_phase_completed = True
+
+        if self.state == NetworkServiceRecordState.VNF_INIT_PHASE:
+            self._vnf_phase_completed = True
+
+        self._op_status.set_state(state)
+
+    @property
+    def id(self):
+        """ Get id for this NSR"""
+        return self._nsr_cfg_msg.id
+
+    @property
+    def name(self):
+        """ Name of this network service record """
+        return self._nsr_cfg_msg.name
+
+    @property
+    def cloud_account_name(self):
+        return self._nsr_cfg_msg.cloud_account
+
+    @property
+    def state(self):
+        """State of this NetworkServiceRecord"""
+        return self._op_status.state
+
+    @property
+    def active(self):
+        """ Is this NSR active ?"""
+        return True if self._op_status.state == NetworkServiceRecordState.RUNNING else False
+
+    @property
+    def vlrs(self):
+        """ VLRs associated with this NSR"""
+        return self._vlrs
+
+    @property
+    def vnfrs(self):
+        """ VNFRs associated with this NSR"""
+        return self._vnfrs
+
+    @property
+    def vnffgrs(self):
+        """ VNFFGRs associated with this NSR"""
+        return self._vnffgrs
+
+    @property
+    def scaling_groups(self):
+        """ Scaling groups associated with this NSR """
+        return self._scaling_groups
+
+    @property
+    def param_pools(self):
+        """ Parameter value pools associated with this NSR"""
+        return self._param_pools
+
+    @property
+    def nsr_cfg_msg(self):
+        return self._nsr_cfg_msg
+
+    @nsr_cfg_msg.setter
+    def nsr_cfg_msg(self, msg):
+        self._nsr_cfg_msg = msg
+
+    @property
+    def nsd_msg(self):
+        """ NSD Protobuf for this NSR """
+        if self._nsd is not None:
+            return self._nsd
+        self._nsd = self._nsr_cfg_msg.nsd
+        return self._nsd
+
+    @property
+    def nsd_id(self):
+        """ NSD ID for this NSR """
+        return self.nsd_msg.id
+
+    @property
+    def job_id(self):
+        ''' Get a new job id for config primitive'''
+        self._job_id += 1
+        return self._job_id
+
+    @property
+    def config_status(self):
+        """ Config status for NSR """
+        return self._config_status
+
+    def resolve_placement_group_cloud_construct(self, input_group):
+        """
+        Returns the cloud specific construct for placement group
+        """
+        copy_dict = ['name', 'requirement', 'strategy']
+
+        for group_info in self._nsr_cfg_msg.nsd_placement_group_maps:
+            if group_info.placement_group_ref == input_group.name:
+                group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+                group_dict = {k:v for k,v in
+                              group_info.as_dict().items() if k != 'placement_group_ref'}
+                for param in copy_dict:
+                    group_dict.update({param: getattr(input_group, param)})
+                group.from_dict(group_dict)
+                return group
+        return None
+
+
+    def __str__(self):
+        return "NSR(name={}, nsd_id={}, cloud_account={})".format(
+                self.name, self.nsd_id, self.cloud_account_name
+                )
+
+    def _get_vnfd(self, vnfd_id, config_xact):
+        """  Fetch vnfd msg for the passed vnfd id """
+        return self._nsm.get_vnfd(vnfd_id, config_xact)
+
+    def _get_vnfd_cloud_account(self, vnfd_member_index):
+        """  Fetch Cloud Account for the passed vnfd id """
+        if self._nsr_cfg_msg.vnf_cloud_account_map:
+           vim_accounts = [vnf.cloud_account  for vnf in self._nsr_cfg_msg.vnf_cloud_account_map \
+                           if vnfd_member_index == vnf.member_vnf_index_ref]
+           if vim_accounts and vim_accounts[0]:
+               return vim_accounts[0]
+        return self.cloud_account_name
+
+    def _get_constituent_vnfd_msg(self, vnf_index):
+        for const_vnfd in self.nsd_msg.constituent_vnfd:
+            if const_vnfd.member_vnf_index == vnf_index:
+                return const_vnfd
+
+        raise ValueError("Constituent VNF index %s not found" % vnf_index)
+
+    def record_event(self, evt, evt_desc, evt_details=None, state=None):
+        """ Record an event """
+        self._op_status.record_event(evt, evt_desc, evt_details)
+        if state is not None:
+            self.set_state(state)
+
+    def scaling_trigger_str(self, trigger):
+        SCALING_TRIGGER_STRS = {
+            NsdYang.ScalingTrigger.PRE_SCALE_IN : 'pre-scale-in',
+            NsdYang.ScalingTrigger.POST_SCALE_IN : 'post-scale-in',
+            NsdYang.ScalingTrigger.PRE_SCALE_OUT : 'pre-scale-out',
+            NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post-scale-out',
+        }
+        try:
+            return SCALING_TRIGGER_STRS[trigger]
+        except Exception as e:
+            self._log.error("Scaling trigger mapping error for {} : {}".
+                            format(trigger, e))
+            self._log.exception(e)
+            return "Unknown trigger"
+
+    @asyncio.coroutine
+    def instantiate_vls(self):
+        """
+        This function instantiates VLs for every VL in this Network Service
+        """
+        self._log.debug("Instantiating %d VLs in NSD id %s", len(self._vlrs),
+                        self.id)
+        for vlr in self._vlrs:
+            yield from self.nsm_plugin.instantiate_vl(self, vlr)
+            vlr.state = VlRecordState.ACTIVE
+
+    @asyncio.coroutine
+    def create(self, config_xact):
+        """ Create this network service"""
+        # Create virtual links  for all the external vnf
+        # connection points in this NS
+        yield from self.create_vls()
+
+        # Create VNFs in this network service
+        yield from self.create_vnfs(config_xact)
+
+        # Create VNFFG for network service
+        self.create_vnffgs()
+
+        # Create Scaling Groups for each scaling group in NSD
+        self.create_scaling_groups()
+
+        # Create Parameter Pools
+        self.create_param_pools()
+
+    @asyncio.coroutine
+    def apply_scale_group_config_script(self, script, group, scale_instance, trigger, vnfrs=None):
+        """ Apply config based on script for scale group """
+
+        @asyncio.coroutine
+        def add_vnfrs_data(vnfrs_list):
+            """ Add as a dict each of the VNFRs data """
+            vnfrs_data = []
+            for vnfr in vnfrs_list:
+                self._log.debug("Add VNFR {} data".format(vnfr))
+                vnfr_data = dict()
+                vnfr_data['name'] = vnfr.name
+                if trigger in [NsdYang.ScalingTrigger.PRE_SCALE_IN, NsdYang.ScalingTrigger.POST_SCALE_OUT]:
+                    # Get VNF management and other IPs, etc
+                    opdata = yield from self.fetch_vnfr(vnfr.xpath)
+                    self._log.debug("VNFR {} op data: {}".format(vnfr.name, opdata))
+                    try:
+                        vnfr_data['rw_mgmt_ip'] = opdata.mgmt_interface.ip_address
+                        vnfr_data['rw_mgmt_port'] = opdata.mgmt_interface.port
+                    except Exception as e:
+                        self._log.error("Unable to get management IP for vnfr {}:{}".
+                                        format(vnfr.name, e))
+
+                    try:
+                        vnfr_data['connection_points'] = []
+                        for cp in opdata.connection_point:
+                            con_pt = dict()
+                            con_pt['name'] = cp.name
+                            con_pt['ip_address'] = cp.ip_address
+                            vnfr_data['connection_points'].append(con_pt)
+                    except Exception as e:
+                        self._log.error("Exception getting connections points for VNFR {}: {}".
+                                        format(vnfr.name, e))
+
+                vnfrs_data.append(vnfr_data)
+                self._log.debug("VNFRs data: {}".format(vnfrs_data))
+
+            return vnfrs_data
+
+        def add_nsr_data(nsr):
+            nsr_data = dict()
+            nsr_data['name'] = nsr.name
+            return nsr_data
+
+        if script is None or len(script) == 0:
+            self._log.error("Script not provided for scale group config: {}".format(group.name))
+            return False
+
+        if script[0] == '/':
+            path = script
+        else:
+            path = os.path.join(os.environ['RIFT_INSTALL'], "usr/bin", script)
+        if not os.path.exists(path):
+            self._log.error("Config faled for scale group {}: Script does not exist at {}".
+                            format(group.name, path))
+            return False
+
+        # Build a YAML file with all parameters for the script to execute
+        # The data consists of 5 sections
+        # 1. Trigger
+        # 2. Scale group config
+        # 3. VNFRs in the scale group
+        # 4. VNFRs outside scale group
+        # 5. NSR data
+        data = dict()
+        data['trigger'] = group.trigger_map(trigger)
+        data['config'] = group.group_msg.as_dict()
+
+        if vnfrs:
+            data["vnfrs_in_group"] = yield from add_vnfrs_data(vnfrs)
+        else:
+            data["vnfrs_in_group"] = yield from add_vnfrs_data(scale_instance.vnfrs)
+
+        data["vnfrs_others"] = yield from add_vnfrs_data(self.vnfrs.values())
+        data["nsr"] = add_nsr_data(self)
+
+        tmp_file = None
+        with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+            tmp_file.write(yaml.dump(data, default_flow_style=True)
+                    .encode("UTF-8"))
+
+        self._log.debug("Creating a temp file: {} with input data: {}".
+                        format(tmp_file.name, data))
+
+        cmd = "{} {}".format(path, tmp_file.name)
+        self._log.debug("Running the CMD: {}".format(cmd))
+        proc = yield from asyncio.create_subprocess_shell(cmd, loop=self._loop)
+        rc = yield from proc.wait()
+        if rc:
+            self._log.error("The script {} for scale group {} config returned: {}".
+                            format(script, group.name, rc))
+            return False
+
+        # Success
+        return True
+
+
+    @asyncio.coroutine
+    def apply_scaling_group_config(self, trigger, group, scale_instance, vnfrs=None):
+        """ Apply the config for the scaling group based on trigger """
+        if group is None or scale_instance is None:
+            return False
+
+        @asyncio.coroutine
+        def update_config_status(success=True, err_msg=None):
+            self._log.debug("Update %s config status to %r : %s",
+                            scale_instance, success, err_msg)
+            if (scale_instance.config_status == "failed"):
+                # Do not update the config status if it is already in failed state
+                return
+
+            if scale_instance.config_status == "configured":
+                # Update only to failed state an already configured scale instance
+                if not success:
+                    scale_instance.config_status = "failed"
+                    scale_instance.config_err_msg = err_msg
+                    yield from self.update_state()
+            else:
+                # We are in configuring state
+                # Only after post scale out mark instance as configured
+                if trigger == NsdYang.ScalingTrigger.POST_SCALE_OUT:
+                    if success:
+                        scale_instance.config_status = "configured"
+                    else:
+                        scale_instance.config_status = "failed"
+                        scale_instance.config_err_msg = err_msg
+                    yield from self.update_state()
+
+        config = group.trigger_config(trigger)
+        if config is None:
+            return True
+
+        self._log.debug("Scaling group {} config: {}".format(group.name, config))
+        if config.has_field("ns_config_primitive_name_ref"):
+            config_name = config.ns_config_primitive_name_ref
+            nsd_msg = self.nsd_msg
+            config_primitive = None
+            for ns_cfg_prim in nsd_msg.service_primitive:
+                if ns_cfg_prim.name == config_name:
+                    config_primitive = ns_cfg_prim
+                    break
+
+            if config_primitive is None:
+                raise ValueError("Could not find ns_cfg_prim %s in nsr %s" % (config_name, self.name))
+
+            self._log.debug("Scaling group {} config primitive: {}".format(group.name, config_primitive))
+            if config_primitive.has_field("user_defined_script"):
+                rc = yield from self.apply_scale_group_config_script(config_primitive.user_defined_script,
+                                                                     group, scale_instance, trigger, vnfrs)
+                err_msg = None
+                if not rc:
+                    err_msg = "Failed config for trigger {} using config script '{}'". \
+                              format(self.scaling_trigger_str(trigger),
+                                     config_primitive.user_defined_script)
+                yield from update_config_status(success=rc, err_msg=err_msg)
+                return rc
+            else:
+                err_msg = "Failed config for trigger {} as config script is not specified". \
+                          format(self.scaling_trigger_str(trigger))
+                yield from update_config_status(success=False, err_msg=err_msg)
+                raise NotImplementedError("Only script based config support for scale group for now: {}".
+                                          format(group.name))
+        else:
+            err_msg = "Failed config for trigger {} as config primitive is not specified".\
+                      format(self.scaling_trigger_str(trigger))
+            yield from update_config_status(success=False, err_msg=err_msg)
+            self._log.error("Config primitive not specified for config action in scale group %s" %
+                            (group.name))
+        return False
+
+    def create_scaling_groups(self):
+        """ This function creates a NSScalingGroup for every scaling
+        group defined in he NSD"""
+
+        for scaling_group_msg in self.nsd_msg.scaling_group_descriptor:
+            self._log.debug("Found scaling_group %s in nsr id %s",
+                            scaling_group_msg.name, self.id)
+
+            group_record = scale_group.ScalingGroup(
+                    self._log,
+                    scaling_group_msg
+                    )
+
+            self._scaling_groups[group_record.name] = group_record
+
+    @asyncio.coroutine
+    def create_scale_group_instance(self, group_name, index, config_xact, is_default=False):
+        group = self._scaling_groups[group_name]
+        scale_instance = group.create_instance(index, is_default)
+
+        @asyncio.coroutine
+        def create_vnfs():
+            self._log.debug("Creating %u VNFs associated with NS id %s scaling group %s",
+                            len(self.nsd_msg.constituent_vnfd), self.id, self)
+
+            vnfrs = []
+            for vnf_index, count in group.vnf_index_count_map.items():
+                const_vnfd_msg = self._get_constituent_vnfd_msg(vnf_index)
+                vnfd_msg = self._get_vnfd(const_vnfd_msg.vnfd_id_ref, config_xact)
+
+                cloud_account_name = self._get_vnfd_cloud_account(const_vnfd_msg.member_vnf_index)
+                if cloud_account_name is None:
+                    cloud_account_name = self.cloud_account_name
+                for _ in range(count):
+                    vnfr = yield from self.create_vnf_record(vnfd_msg, const_vnfd_msg, cloud_account_name, group_name, index)
+                    scale_instance.add_vnfr(vnfr)
+                    vnfrs.append(vnfr)
+
+            return vnfrs
+
+        @asyncio.coroutine
+        def instantiate_instance():
+            self._log.debug("Creating %s VNFRS", scale_instance)
+            vnfrs = yield from create_vnfs()
+            yield from self.publish()
+
+            self._log.debug("Instantiating %s VNFRS for %s", len(vnfrs), scale_instance)
+            scale_instance.operational_status = "vnf_init_phase"
+            yield from self.update_state()
+
+            try:
+                rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_OUT,
+                                                                group, scale_instance, vnfrs)
+                if not rc:
+                    self._log.error("Pre scale out config for scale group {} ({}) failed".
+                                    format(group.name, index))
+                    scale_instance.operational_status = "failed"
+                else:
+                    yield from self.instantiate_vnfs(vnfrs)
+
+            except Exception as e:
+                self._log.exception("Failed to begin instantiatiation of vnfs for scale group {}: {}".
+                                    format(group.name, e))
+                self._log.exception(e)
+                scale_instance.operational_status = "failed"
+
+            yield from self.update_state()
+
+        yield from instantiate_instance()
+
+    @asyncio.coroutine
+    def delete_scale_group_instance(self, group_name, index):
+        group = self._scaling_groups[group_name]
+        scale_instance = group.get_instance(index)
+        if scale_instance.is_default:
+            raise ScalingOperationError("Cannot terminate a default scaling group instance")
+
+        scale_instance.operational_status = "terminate"
+        yield from self.update_state()
+
+        @asyncio.coroutine
+        def terminate_instance():
+            self._log.debug("Terminating %s VNFRS" % scale_instance)
+            rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_IN,
+                                                            group, scale_instance)
+            if not rc:
+                self._log.error("Pre scale in config for scale group {} ({}) failed".
+                                format(group.name, index))
+
+            # Going ahead with terminate, even if there is an error in pre-scale-in config
+            # as this could be result of scale out failure and we need to cleanup this group
+            yield from self.terminate_vnfrs(scale_instance.vnfrs)
+            group.delete_instance(index)
+
+            scale_instance.operational_status = "vnf_terminate_phase"
+            yield from self.update_state()
+
+        yield from terminate_instance()
+
+    @asyncio.coroutine
+    def _update_scale_group_instances_status(self):
+        @asyncio.coroutine
+        def post_scale_out_task(group, instance):
+            # Apply post scale out config once all VNFRs are active
+            rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_OUT,
+                                                            group, instance)
+            instance.operational_status = "running"
+            if rc:
+                self._log.debug("Scale out for group {} and instance {} succeeded".
+                                format(group.name, instance.instance_id))
+            else:
+                self._log.error("Post scale out config for scale group {} ({}) failed".
+                                format(group.name, instance.instance_id))
+
+            yield from self.update_state()
+
+        group_instances = {group: group.instances for group in self._scaling_groups.values()}
+        for group, instances in group_instances.items():
+            self._log.debug("Updating %s instance status", group)
+            for instance in instances:
+                instance_vnf_state_list = [vnfr.state for vnfr in instance.vnfrs]
+                self._log.debug("Got vnfr instance states: %s", instance_vnf_state_list)
+                if instance.operational_status == "vnf_init_phase":
+                    if all([state == VnfRecordState.ACTIVE for state in instance_vnf_state_list]):
+                        instance.operational_status = "running"
+
+                        # Create a task for post scale out to allow us to sleep before attempting
+                        # to configure newly created VM's
+                        self._loop.create_task(post_scale_out_task(group, instance))
+
+                    elif any([state == VnfRecordState.FAILED for state in instance_vnf_state_list]):
+                        self._log.debug("Scale out for group {} and instance {} failed".
+                                        format(group.name, instance.instance_id))
+                        instance.operational_status = "failed"
+
+                elif instance.operational_status == "vnf_terminate_phase":
+                    if all([state == VnfRecordState.TERMINATED for state in instance_vnf_state_list]):
+                        instance.operational_status = "terminated"
+                        rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_IN,
+                                                                         group, instance)
+                        if rc:
+                            self._log.debug("Scale in for group {} and instance {} succeeded".
+                                            format(group.name, instance.instance_id))
+                        else:
+                            self._log.error("Post scale in config for scale group {} ({}) failed".
+                                            format(group.name, instance.instance_id))
+
+    def create_vnffgs(self):
+        """ This function creates VNFFGs for every VNFFG in the NSD
+        associated with this NSR"""
+
+        for vnffgd in self.nsd_msg.vnffgd:
+            self._log.debug("Found vnffgd %s in nsr id %s", vnffgd, self.id)
+            vnffgr = VnffgRecord(self._dts,
+                                 self._log,
+                                 self._loop,
+                                 self._nsm._vnffgmgr,
+                                 self,
+                                 self.name,
+                                 vnffgd,
+                                 self._sdn_account_name
+                                 )
+            self._vnffgrs[vnffgr.id] = vnffgr
+
+    def resolve_vld_ip_profile(self, nsd_msg, vld):
+        if not vld.has_field('ip_profile_ref'):
+            return None
+        profile = [ profile for profile in nsd_msg.ip_profiles if profile.name == vld.ip_profile_ref ]
+        return profile[0] if profile else None
+
+    @asyncio.coroutine
+    def _create_vls(self, vld, cloud_account):
+        """Create a VLR in the cloud account specified using the given VLD
+        
+        Args:
+            vld : VLD yang obj
+            cloud_account : Cloud account name
+        
+        Returns:
+            VirtualLinkRecord
+        """
+        vlr = yield from VirtualLinkRecord.create_record(
+                self._dts,
+                self._log,
+                self._loop,
+                self.name,
+                vld,
+                cloud_account,
+                self.resolve_vld_ip_profile(self.nsd_msg, vld),
+                self.id,
+                restart_mode=self.restart_mode)
+
+        return vlr
+
+    def _extract_cloud_accounts_for_vl(self, vld):
+        """
+        Extracts the list of cloud accounts from the NS Config obj
+
+        Rules:
+        1. Cloud accounts based connection point (vnf_cloud_account_map)
+        Args:
+            vld : VLD yang object
+
+        Returns:
+            TYPE: Description
+        """
+        cloud_account_list = []
+
+        if self._nsr_cfg_msg.vnf_cloud_account_map:
+            # Handle case where cloud_account is None
+            vnf_cloud_map = {}
+            for vnf in self._nsr_cfg_msg.vnf_cloud_account_map:
+                if vnf.cloud_account is not None:
+                    vnf_cloud_map[vnf.member_vnf_index_ref] = vnf.cloud_account
+
+            for vnfc in vld.vnfd_connection_point_ref:
+                cloud_account = vnf_cloud_map.get(
+                        vnfc.member_vnf_index_ref,
+                        self.cloud_account_name)
+
+                cloud_account_list.append(cloud_account)
+
+        if self._nsr_cfg_msg.vl_cloud_account_map:
+            for vld_map in self._nsr_cfg_msg.vl_cloud_account_map:
+                if vld_map.vld_id_ref == vld.id:
+                    cloud_account_list.extend(vld_map.cloud_accounts)
+
+        # If no config has been provided then fall-back to the default
+        # account
+        if not cloud_account_list:
+            cloud_account_list = [self.cloud_account_name]
+
+        self._log.debug("VL {} cloud accounts: {}".
+                        format(vld.name, cloud_account_list))
+        return set(cloud_account_list)
+
+    @asyncio.coroutine
+    def create_vls(self):
+        """ This function creates VLs for every VLD in the NSD
+        associated with this NSR"""
+        for vld in self.nsd_msg.vld:
+            self._log.debug("Found vld %s in nsr id %s", vld, self.id)
+            cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
+            for account in cloud_account_list:
+                vlr = yield from self._create_vls(vld, account)
+                self._vlrs.append(vlr)
+
+
+    @asyncio.coroutine
+    def create_vl_instance(self, vld):
+        self._log.debug("Create VL for {}: {}".format(self.id, vld.as_dict()))
+        # Check if the VL is already present
+        vlr = None
+        for vl in self._vlrs:
+            if vl.vld_msg.id == vld.id:
+                self._log.debug("The VLD %s already in NSR %s as VLR %s with status %s",
+                                vld.id, self.id, vl.id, vl.state)
+                vlr = vl
+                if vlr.state != VlRecordState.TERMINATED:
+                    err_msg = "VLR for VL %s in NSR %s already instantiated", \
+                              vld, self.id
+                    self._log.error(err_msg)
+                    raise NsrVlUpdateError(err_msg)
+                break
+
+        if vlr is None:
+            cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
+            for account in cloud_account_list:
+                vlr = yield from self._create_vls(vld, account)
+                self._vlrs.append(vlr)
+
+        vlr.state = VlRecordState.INSTANTIATION_PENDING
+        yield from self.update_state()
+
+        try:
+            yield from self.nsm_plugin.instantiate_vl(self, vlr)
+            vlr.state = VlRecordState.ACTIVE
+
+        except Exception as e:
+            err_msg = "Error instantiating VL for NSR {} and VLD {}: {}". \
+                      format(self.id, vld.id, e)
+            self._log.error(err_msg)
+            self._log.exception(e)
+            vlr.state = VlRecordState.FAILED
+
+        yield from self.update_state()
+
+    @asyncio.coroutine
+    def delete_vl_instance(self, vld):
+        for vlr in self._vlrs:
+            if vlr.vld_msg.id == vld.id:
+                self._log.debug("Found VLR %s for VLD %s in NSR %s",
+                                vlr.id, vld.id, self.id)
+                vlr.state = VlRecordState.TERMINATE_PENDING
+                yield from self.update_state()
+
+                try:
+                    yield from self.nsm_plugin.terminate_vl(vlr)
+                    vlr.state = VlRecordState.TERMINATED
+                    self._vlrs.remove(vlr)
+
+                except Exception as e:
+                    err_msg = "Error terminating VL for NSR {} and VLD {}: {}". \
+                              format(self.id, vld.id, e)
+                    self._log.error(err_msg)
+                    self._log.exception(e)
+                    vlr.state = VlRecordState.FAILED
+
+                yield from self.update_state()
+                break
+
+    @asyncio.coroutine
+    def create_vnfs(self, config_xact):
+        """
+        This function creates VNFs for every VNF in the NSD
+        associated with this NSR
+        """
+        self._log.debug("Creating %u VNFs associated with this NS id %s",
+                        len(self.nsd_msg.constituent_vnfd), self.id)
+
+        for const_vnfd in self.nsd_msg.constituent_vnfd:
+            if not const_vnfd.start_by_default:
+                self._log.debug("start_by_default set to False in constituent VNF (%s). Skipping start.",
+                                const_vnfd.member_vnf_index)
+                continue
+
+            vnfd_msg = self._get_vnfd(const_vnfd.vnfd_id_ref, config_xact)
+            cloud_account_name = self._get_vnfd_cloud_account(const_vnfd.member_vnf_index)
+            if cloud_account_name is None:
+                cloud_account_name = self.cloud_account_name
+            yield from self.create_vnf_record(vnfd_msg, const_vnfd, cloud_account_name)
+
+
+    def get_placement_groups(self, vnfd_msg, const_vnfd):
+        placement_groups = []
+        for group in self.nsd_msg.placement_groups:
+            for member_vnfd in group.member_vnfd:
+                if (member_vnfd.vnfd_id_ref == vnfd_msg.id) and \
+                   (member_vnfd.member_vnf_index_ref == const_vnfd.member_vnf_index):
+                    group_info = self.resolve_placement_group_cloud_construct(group)
+                    if group_info is None:
+                        self._log.error("Could not resolve cloud-construct for placement group: %s", group.name)
+                        ### raise PlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+                    else:
+                        self._log.info("Successfully resolved cloud construct for placement group: %s for VNF: %s (Member Index: %s)",
+                                       str(group_info),
+                                       vnfd_msg.name,
+                                       const_vnfd.member_vnf_index)
+                        placement_groups.append(group_info)
+        return placement_groups
+
+    @asyncio.coroutine
+    def create_vnf_record(self, vnfd_msg, const_vnfd, cloud_account_name, group_name=None, group_instance_id=None):
+        # Fetch the VNFD associated with this VNF
+        placement_groups = self.get_placement_groups(vnfd_msg, const_vnfd)
+        self._log.info("Cloud Account for VNF %d is %s",const_vnfd.member_vnf_index,cloud_account_name)
+        self._log.info("Launching VNF: %s (Member Index: %s) in NSD plancement Groups: %s",
+                       vnfd_msg.name,
+                       const_vnfd.member_vnf_index,
+                       [ group.name for group in placement_groups])
+        vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
+                                            self._log,
+                                            self._loop,
+                                            vnfd_msg,
+                                            const_vnfd,
+                                            self.nsd_id,
+                                            self.name,
+                                            cloud_account_name,
+                                            self.id,
+                                            group_name,
+                                            group_instance_id,
+                                            placement_groups,
+                                            restart_mode=self.restart_mode,
+                                            )
+        if vnfr.id in self._vnfrs:
+            err = "VNF with VNFR id %s already in vnf list" % (vnfr.id,)
+            raise NetworkServiceRecordError(err)
+
+        self._vnfrs[vnfr.id] = vnfr
+        self._nsm.vnfrs[vnfr.id] = vnfr
+
+        yield from vnfr.set_config_status(NsrYang.ConfigStates.INIT)
+
+        self._log.debug("Added VNFR %s to NSM VNFR list with id %s",
+                        vnfr.name,
+                        vnfr.id)
+
+        return vnfr
+
+    def create_param_pools(self):
+        for param_pool in self.nsd_msg.parameter_pool:
+            self._log.debug("Found parameter pool %s in nsr id %s", param_pool, self.id)
+
+            start_value = param_pool.range.start_value
+            end_value = param_pool.range.end_value
+            if end_value < start_value:
+                raise NetworkServiceRecordError(
+                        "Parameter pool %s has invalid range (start: {}, end: {})".format(
+                            start_value, end_value
+                            )
+                        )
+
+            self._param_pools[param_pool.name] = config_value_pool.ParameterValuePool(
+                    self._log,
+                    param_pool.name,
+                    range(start_value, end_value)
+                    )
+
+    @asyncio.coroutine
+    def fetch_vnfr(self, vnfr_path):
+        """ Fetch VNFR record """
+        vnfr = None
+        self._log.debug("Fetching VNFR with key %s while instantiating %s",
+                        vnfr_path, self.id)
+        res_iter = yield from self._dts.query_read(vnfr_path, rwdts.XactFlag.MERGE)
+
+        for ent in res_iter:
+            res = yield from ent
+            vnfr = res.result
+
+        return vnfr
+
+    @asyncio.coroutine
+    def instantiate_vnfs(self, vnfrs):
+        """
+        This function instantiates VNFs for every VNF in this Network Service
+        """
+        self._log.debug("Instantiating %u VNFs in NS %s", len(vnfrs), self.id)
+        for vnf in vnfrs:
+            self._log.debug("Instantiating VNF: %s in NS %s", vnf, self.id)
+            yield from self.nsm_plugin.instantiate_vnf(self, vnf)
+
+    @asyncio.coroutine
+    def instantiate_vnffgs(self):
+        """
+        This function instantiates VNFFGs for every VNFFG in this Network Service
+        """
+        self._log.debug("Instantiating %u VNFFGs in NS %s",
+                        len(self.nsd_msg.vnffgd), self.id)
+        for _, vnfr in self.vnfrs.items():
+            while vnfr.state in [VnfRecordState.INSTANTIATION_PENDING, VnfRecordState.INIT]:
+                self._log.debug("Received vnfr state for vnfr %s is %s; retrying",vnfr.name,vnfr.state)
+                yield from asyncio.sleep(2, loop=self._loop)
+            if vnfr.state == VnfRecordState.ACTIVE:
+                self._log.debug("Received vnfr state for vnfr %s is %s ",vnfr.name,vnfr.state)
+                continue
+            else:
+                self._log.debug("Received vnfr state for vnfr %s is %s; failing vnffg creation",vnfr.name,vnfr.state)
+                self._vnffgr_state = VnffgRecordState.FAILED
+                return
+
+        self._log.info("Waiting for 90 seconds for VMs to come up")
+        yield from asyncio.sleep(90, loop=self._loop)
+        self._log.info("Starting VNFFG orchestration")
+        for vnffg in self._vnffgrs.values():
+            self._log.debug("Instantiating VNFFG: %s in NS %s", vnffg, self.id)
+            yield from vnffg.instantiate()
+
+    @asyncio.coroutine
+    def instantiate_scaling_instances(self, config_xact):
+        """ Instantiate any default scaling instances in this Network Service """
+        for group in self._scaling_groups.values():
+            for i in range(group.min_instance_count):
+                self._log.debug("Instantiating %s default scaling instance %s", group, i)
+                yield from self.create_scale_group_instance(
+                        group.name, i, config_xact, is_default=True
+                        )
+
+            for group_msg in self._nsr_cfg_msg.scaling_group:
+                if group_msg.scaling_group_name_ref != group.name:
+                    continue
+
+                for instance in group_msg.instance:
+                    self._log.debug("Reloading %s scaling instance %s", group_msg, instance.id)
+                    yield from self.create_scale_group_instance(
+                            group.name, instance.id, config_xact, is_default=False
+                            )
+
+    def has_scaling_instances(self):
+        """ Return boolean indicating if the network service has default scaling groups """
+        for group in self._scaling_groups.values():
+            if group.min_instance_count > 0:
+                return True
+
+        for group_msg in self._nsr_cfg_msg.scaling_group:
+            if len(group_msg.instance) > 0:
+                return True
+
+        return False
+
+    @asyncio.coroutine
+    def publish(self):
+        """ This function publishes this NSR """
+        self._nsr_msg = self.create_msg()
+
+        self._log.debug("Publishing the NSR with xpath %s and nsr %s",
+                        self.nsr_xpath,
+                        self._nsr_msg)
+
+        if self._debug_running:
+            self._log.debug("Publishing NSR in RUNNING state!")
+            #raise()
+
+        with self._dts.transaction() as xact:
+            yield from self._nsm.nsr_handler.update(xact, self.nsr_xpath, self._nsr_msg)
+            if self._op_status.state == NetworkServiceRecordState.RUNNING:
+                self._debug_running = True
+
+    @asyncio.coroutine
+    def unpublish(self, xact):
+        """ Unpublish this NSR object """
+        self._log.debug("Unpublishing Network service id %s", self.id)
+        yield from self._nsm.nsr_handler.delete(xact, self.nsr_xpath)
+
+    @property
+    def nsr_xpath(self):
+        """ Returns the xpath associated with this NSR """
+        return(
+            "D,/nsr:ns-instance-opdata" +
+            "/nsr:nsr[nsr:ns-instance-config-ref = '{}']"
+            ).format(self.id)
+
+    @staticmethod
+    def xpath_from_nsr(nsr):
+        """ Returns the xpath associated with this NSR  op data"""
+        return (NetworkServiceRecord.XPATH +
+                "[nsr:ns-instance-config-ref = '{}']").format(nsr.id)
+
+    @property
+    def nsd_xpath(self):
+        """ Return NSD config xpath."""
+        return(
+            "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}']"
+            ).format(self.nsd_id)
+
+    @asyncio.coroutine
+    def instantiate(self, config_xact):
+        """"Instantiates a NetworkServiceRecord.
+
+        This function instantiates a Network service
+        which involves the following steps,
+
+        * Instantiate every VL in NSD by sending create VLR request to DTS.
+        * Instantiate every VNF in NSD by sending create VNF reuqest to DTS.
+        * Publish the NSR details to DTS
+
+        Arguments:
+            nsr:  The NSR configuration request containing nsr-id and nsd
+            config_xact: The configuration transaction which initiated the instatiation
+
+        Raises:
+            NetworkServiceRecordError if the NSR creation fails
+
+        Returns:
+            No return value
+        """
+
+        self._log.debug("Instantiating NS - %s xact - %s", self, config_xact)
+
+        # Move the state to INIITALIZING
+        self.set_state(NetworkServiceRecordState.INIT)
+
+        event_descr = "Instantiation Request Received NSR Id:%s" % self.id
+        self.record_event("instantiating", event_descr)
+
+        # Find the NSD
+        self._nsd = self._nsr_cfg_msg.nsd
+
+        try:
+            # Update ref count if nsd present in catalog
+            self._nsm.get_nsd_ref(self.nsd_id)
+
+        except NetworkServiceDescriptorError:
+            # This could be an NSD not in the nsd-catalog
+            pass
+
+        # Merge any config and initial config primitive values
+        self.config_store.merge_nsd_config(self.nsd_msg)
+        self._log.debug("Merged NSD: {}".format(self.nsd_msg.as_dict()))
+
+        event_descr = "Fetched NSD with descriptor id %s" % self.nsd_id
+        self.record_event("nsd-fetched", event_descr)
+
+        if self._nsd is None:
+            msg = "Failed to fetch NSD with nsd-id [%s] for nsr-id %s"
+            self._log.debug(msg, self.nsd_id, self.id)
+            raise NetworkServiceRecordError(self)
+
+        self._log.debug("Got nsd result %s", self._nsd)
+
+        # Substitute any input parameters
+        self.substitute_input_parameters(self._nsd, self._nsr_cfg_msg)
+
+        # Create the record
+        yield from self.create(config_xact)
+
+        # Publish the NSR to DTS
+        yield from self.publish()
+
+        @asyncio.coroutine
+        def do_instantiate():
+            """
+                Instantiate network service
+            """
+            self._log.debug("Instantiating VLs nsr id [%s] nsd id [%s]",
+                            self.id, self.nsd_id)
+
+            # instantiate the VLs
+            event_descr = ("Instantiating %s external VLs for NSR id %s" %
+                           (len(self.nsd_msg.vld), self.id))
+            self.record_event("begin-external-vls-instantiation", event_descr)
+
+            self.set_state(NetworkServiceRecordState.VL_INIT_PHASE)
+
+            yield from self.instantiate_vls()
+
+            # Publish the NSR to DTS
+            yield from self.publish()
+
+            event_descr = ("Finished instantiating %s external VLs for NSR id %s" %
+                           (len(self.nsd_msg.vld), self.id))
+            self.record_event("end-external-vls-instantiation", event_descr)
+
+            self.set_state(NetworkServiceRecordState.VNF_INIT_PHASE)
+
+            self._log.debug("Instantiating VNFs  ...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+            # instantiate the VNFs
+            event_descr = ("Instantiating %s VNFS for NSR id %s" %
+                           (len(self.nsd_msg.constituent_vnfd), self.id))
+
+            self.record_event("begin-vnf-instantiation", event_descr)
+
+            yield from self.instantiate_vnfs(self._vnfrs.values())
+
+            self._log.debug(" Finished instantiating %d VNFs for NSR id %s",
+                            len(self.nsd_msg.constituent_vnfd), self.id)
+
+            event_descr = ("Finished instantiating %s VNFs for NSR id %s" %
+                           (len(self.nsd_msg.constituent_vnfd), self.id))
+            self.record_event("end-vnf-instantiation", event_descr)
+
+            if len(self.vnffgrs) > 0:
+                #self.set_state(NetworkServiceRecordState.VNFFG_INIT_PHASE)
+                event_descr = ("Instantiating %s VNFFGS for NSR id %s" %
+                               (len(self.nsd_msg.vnffgd), self.id))
+
+                self.record_event("begin-vnffg-instantiation", event_descr)
+
+                yield from self.instantiate_vnffgs()
+
+                event_descr = ("Finished instantiating %s VNFFGDs for NSR id %s" %
+                               (len(self.nsd_msg.vnffgd), self.id))
+                self.record_event("end-vnffg-instantiation", event_descr)
+
+            if self.has_scaling_instances():
+                event_descr = ("Instantiating %s Scaling Groups for NSR id %s" %
+                               (len(self._scaling_groups), self.id))
+
+                self.record_event("begin-scaling-group-instantiation", event_descr)
+                yield from self.instantiate_scaling_instances(config_xact)
+                self.record_event("end-scaling-group-instantiation", event_descr)
+
+            # Give the plugin a chance to deploy the network service now that all
+            # virtual links and vnfs are instantiated
+            yield from self.nsm_plugin.deploy(self._nsr_msg)
+
+            self._log.debug("Publishing  NSR...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+            # Publish the NSR to DTS
+            yield from self.publish()
+
+            self._log.debug("Published  NSR...... nsr[%s], nsd[%s]",
+                            self.id, self.nsd_id)
+
+        def on_instantiate_done(fut):
+            # If the do_instantiate fails, then publish NSR with failed result
+            if fut.exception() is not None:
+                self._log.error("NSR instantiation failed for NSR id %s: %s", self.id, str(fut.exception()))
+                self._loop.create_task(self.instantiation_failed(failed_reason=str(fut.exception())))
+
+        instantiate_task = self._loop.create_task(do_instantiate())
+        instantiate_task.add_done_callback(on_instantiate_done)
+
+    @asyncio.coroutine
+    def set_config_status(self, status, status_details=None):
+        if self.config_status != status:
+            self._log.debug("Updating NSR {} status for {} to {}".
+                            format(self.name, self.config_status, status))
+            self._config_status = status
+            self._config_status_details = status_details
+
+            if self._config_status == NsrYang.ConfigStates.FAILED:
+                self.record_event("config-failed", "NS configuration failed",
+                        evt_details=self._config_status_details)
+
+            yield from self.publish()
+
+    @asyncio.coroutine
+    def is_active(self):
+        """ This NS is active """
+        self.set_state(NetworkServiceRecordState.RUNNING)
+        if self._is_active:
+            return
+
+        # Publish the NSR to DTS
+        self._log.debug("Network service %s is active ", self.id)
+        self._is_active = True
+
+        event_descr = "NSR in running state for NSR id %s" % self.id
+        self.record_event("ns-running", event_descr)
+
+        yield from self.publish()
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ The NS instantiation failed"""
+        self._log.error("Network service id:%s, name:%s instantiation failed",
+                        self.id, self.name)
+        self.set_state(NetworkServiceRecordState.FAILED)
+
+        event_descr = "Instantiation of NS %s failed" % self.id
+        self.record_event("ns-failed", event_descr, evt_details=failed_reason)
+
+        # Publish the NSR to DTS
+        yield from self.publish()
+
+    @asyncio.coroutine
+    def terminate_vnfrs(self, vnfrs):
+        """ Terminate VNFRS in this network service """
+        self._log.debug("Terminating VNFs in network service %s", self.id)
+        for vnfr in vnfrs:
+            yield from self.nsm_plugin.terminate_vnf(vnfr)
+
+    @asyncio.coroutine
+    def terminate(self):
+        """ Terminate a NetworkServiceRecord."""
+        def terminate_vnffgrs():
+            """ Terminate VNFFGRS in this network service """
+            self._log.debug("Terminating VNFFGRs in network service %s", self.id)
+            for vnffgr in self.vnffgrs.values():
+                yield from vnffgr.terminate()
+
+        def terminate_vlrs():
+            """ Terminate VLRs in this netork service """
+            self._log.debug("Terminating VLs in network service %s", self.id)
+            for vlr in self.vlrs:
+                yield from self.nsm_plugin.terminate_vl(vlr)
+                vlr.state = VlRecordState.TERMINATED
+
+        self._log.debug("Terminating network service id %s", self.id)
+
+        # Move the state to TERMINATE
+        self.set_state(NetworkServiceRecordState.TERMINATE)
+        event_descr = "Terminate being processed for NS Id:%s" % self.id
+        self.record_event("terminate", event_descr)
+
+        # Move the state to VNF_TERMINATE_PHASE
+        self._log.debug("Terminating VNFFGs in NS ID: %s", self.id)
+        self.set_state(NetworkServiceRecordState.VNFFG_TERMINATE_PHASE)
+        event_descr = "Terminating VNFFGS in NS Id:%s" % self.id
+        self.record_event("terminating-vnffgss", event_descr)
+        yield from terminate_vnffgrs()
+
+        # Move the state to VNF_TERMINATE_PHASE
+        self.set_state(NetworkServiceRecordState.VNF_TERMINATE_PHASE)
+        event_descr = "Terminating VNFS in NS Id:%s" % self.id
+        self.record_event("terminating-vnfs", event_descr)
+        yield from self.terminate_vnfrs(self.vnfrs.values())
+
+        # Move the state to VL_TERMINATE_PHASE
+        self.set_state(NetworkServiceRecordState.VL_TERMINATE_PHASE)
+        event_descr = "Terminating VLs in NS Id:%s" % self.id
+        self.record_event("terminating-vls", event_descr)
+        yield from terminate_vlrs()
+
+        yield from self.nsm_plugin.terminate_ns(self)
+
+        # Move the state to TERMINATED
+        self.set_state(NetworkServiceRecordState.TERMINATED)
+        event_descr = "Terminated NS Id:%s" % self.id
+        self.record_event("terminated", event_descr)
+
+    def enable(self):
+        """"Enable a NetworkServiceRecord."""
+        pass
+
+    def disable(self):
+        """"Disable a NetworkServiceRecord."""
+        pass
+
+    def map_config_status(self):
+        self._log.debug("Config status for ns {} is {}".
+                        format(self.name, self._config_status))
+        if self._config_status == NsrYang.ConfigStates.CONFIGURING:
+            return 'configuring'
+        if self._config_status == NsrYang.ConfigStates.FAILED:
+            return 'failed'
+        return 'configured'
+
+    def vl_phase_completed(self):
+        """ Are VLs created in this NS?"""
+        return self._vl_phase_completed
+
+    def vnf_phase_completed(self):
+        """ Are VLs created in this NS?"""
+        return self._vnf_phase_completed
+
+    def create_msg(self):
+        """ The network serice record as a message """
+        nsr_dict = {"ns_instance_config_ref": self.id}
+        nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+        #nsr.cloud_account = self.cloud_account_name
+        nsr.sdn_account = self._sdn_account_name
+        nsr.name_ref = self.name
+        nsr.nsd_ref = self.nsd_id
+        nsr.nsd_name_ref = self.nsd_msg.name
+        nsr.operational_events = self._op_status.msg
+        nsr.operational_status = self._op_status.yang_str()
+        nsr.config_status = self.map_config_status()
+        nsr.config_status_details = self._config_status_details
+        nsr.create_time = self._create_time
+
+        for cfg_prim in self.nsd_msg.service_primitive:
+            cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
+                    cfg_prim.as_dict())
+            nsr.service_primitive.append(cfg_prim)
+
+        for init_cfg in self.nsd_msg.initial_config_primitive:
+            prim = NsrYang.NsrInitialConfigPrimitive.from_dict(
+                init_cfg.as_dict())
+            nsr.initial_config_primitive.append(prim)
+
+        if self.vl_phase_completed():
+            for vlr in self.vlrs:
+                nsr.vlr.append(vlr.create_nsr_vlr_msg(self.vnfrs.values()))
+
+        if self.vnf_phase_completed():
+            for vnfr_id in self.vnfrs:
+                nsr.constituent_vnfr_ref.append(self.vnfrs[vnfr_id].const_vnfr_msg)
+            for vnffgr in self.vnffgrs.values():
+                nsr.vnffgr.append(vnffgr.fetch_vnffgr())
+            for scaling_group in self._scaling_groups.values():
+                nsr.scaling_group_record.append(scaling_group.create_record_msg())
+
+        return nsr
+
+    def all_vnfs_active(self):
+        """ Are all VNFS in this NS active? """
+        for _, vnfr in self.vnfrs.items():
+            if vnfr.active is not True:
+                return False
+        return True
+
+    @asyncio.coroutine
+    def update_state(self):
+        """ Re-evaluate this  NS's state """
+        curr_state = self._op_status.state
+
+        if curr_state == NetworkServiceRecordState.TERMINATED:
+            self._log.debug("NS (%s) in terminated state, not updating state", self.id)
+            return
+
+        new_state = NetworkServiceRecordState.RUNNING
+        self._log.info("Received update_state for nsr: %s, curr-state: %s",
+                       self.id, curr_state)
+
+        # Check all the VNFRs are present
+        for _, vnfr in self.vnfrs.items():
+            if vnfr.state in [VnfRecordState.ACTIVE, VnfRecordState.TERMINATED]:
+                pass
+            elif vnfr.state == VnfRecordState.FAILED:
+                if vnfr._prev_state != vnfr.state:
+                    event_descr = "Instantiation of VNF %s failed" % vnfr.id
+                    event_error_details = vnfr.state_failed_reason
+                    self.record_event("vnf-failed", event_descr, evt_details=event_error_details)
+                    vnfr.set_state(VnfRecordState.FAILED)
+                else:
+                    self._log.info("VNF state did not change, curr=%s, prev=%s",
+                                   vnfr.state, vnfr._prev_state)
+                new_state = NetworkServiceRecordState.FAILED
+                break
+            else:
+                self._log.info("VNF %s in NSR %s is still not active; current state is: %s",
+                               vnfr.id, self.id, vnfr.state)
+                new_state = curr_state
+
+        # If new state is RUNNING; check all VLs
+        if new_state == NetworkServiceRecordState.RUNNING:
+            for vl in self.vlrs:
+
+                if vl.state in [VlRecordState.ACTIVE, VlRecordState.TERMINATED]:
+                    pass
+                elif vl.state == VlRecordState.FAILED:
+                    if vl.prev_state != vl.state:
+                        event_descr = "Instantiation of VL %s failed" % vl.id
+                        event_error_details = vl.state_failed_reason
+                        self.record_event("vl-failed", event_descr, evt_details=event_error_details)
+                        vl.prev_state = vl.state
+                    else:
+                        self._log.debug("VL %s already in failed state")
+                else:
+                    if vl.state in [VlRecordState.INSTANTIATION_PENDING, VlRecordState.INIT]:
+                        new_state = NetworkServiceRecordState.VL_INSTANTIATE
+                        break
+
+                    if vl.state in [VlRecordState.TERMINATE_PENDING]:
+                        new_state = NetworkServiceRecordState.VL_TERMINATE
+                        break
+
+        # If new state is RUNNING; check VNFFGRs are also active
+        if new_state == NetworkServiceRecordState.RUNNING:
+            for _, vnffgr in self.vnffgrs.items():
+                self._log.info("Checking vnffgr state for nsr %s is: %s",
+                               self.id, vnffgr.state)
+                if vnffgr.state == VnffgRecordState.ACTIVE:
+                    pass
+                elif vnffgr.state == VnffgRecordState.FAILED:
+                    event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id
+                    self.record_event("vnffg-failed", event_descr)
+                    new_state = NetworkServiceRecordState.FAILED
+                    break
+                else:
+                    self._log.info("VNFFGR %s in NSR %s is still not active; current state is: %s",
+                                    vnffgr.id, self.id, vnffgr.state)
+                    new_state = curr_state
+
+        # Update all the scaling group instance operational status to
+        # reflect the state of all VNFR within that instance
+        yield from self._update_scale_group_instances_status()
+
+        for _, group in self._scaling_groups.items():
+            if group.state == scale_group.ScaleGroupState.SCALING_OUT:
+                new_state = NetworkServiceRecordState.SCALING_OUT
+                break
+            elif group.state == scale_group.ScaleGroupState.SCALING_IN:
+                new_state = NetworkServiceRecordState.SCALING_IN
+                break
+
+        if new_state != curr_state:
+            self._log.debug("Changing state of Network service %s from %s to %s",
+                            self.id, curr_state, new_state)
+            if new_state == NetworkServiceRecordState.RUNNING:
+                yield from self.is_active()
+            elif new_state == NetworkServiceRecordState.FAILED:
+                # If the NS is already active and we entered scaling_in, scaling_out,
+                # do not mark the NS as failing if scaling operation failed.
+                if curr_state in [NetworkServiceRecordState.SCALING_OUT,
+                                  NetworkServiceRecordState.SCALING_IN] and self._is_active:
+                    new_state = NetworkServiceRecordState.RUNNING
+                    self.set_state(new_state)
+                else:
+                    yield from self.instantiation_failed()
+            else:
+                self.set_state(new_state)
+
+        yield from self.publish()
+
+
+class InputParameterSubstitution(object):
+    """
+    This class is responsible for substituting input parameters into an NSD.
+    """
+
+    def __init__(self, log):
+        """Create an instance of InputParameterSubstitution
+
+        Arguments:
+            log - a logger for this object to use
+
+        """
+        self.log = log
+
+    def __call__(self, nsd, nsr_config):
+        """Substitutes input parameters from the NSR config into the NSD
+
+        This call modifies the provided NSD with the input parameters that are
+        contained in the NSR config.
+
+        Arguments:
+            nsd        - a GI NSD object
+            nsr_config - a GI NSR config object
+
+        """
+        if nsd is None or nsr_config is None:
+            return
+
+        # Create a lookup of the xpath elements that this descriptor allows
+        # to be modified
+        optional_input_parameters = set()
+        for input_parameter in nsd.input_parameter_xpath:
+            optional_input_parameters.add(input_parameter.xpath)
+
+        # Apply the input parameters to the descriptor
+        if nsr_config.input_parameter:
+            for param in nsr_config.input_parameter:
+                if param.xpath not in optional_input_parameters:
+                    msg = "tried to set an invalid input parameter ({})"
+                    self.log.error(msg.format(param.xpath))
+                    continue
+
+                self.log.debug(
+                        "input-parameter:{} = {}".format(
+                            param.xpath,
+                            param.value,
+                            )
+                        )
+
+                try:
+                    xpath.setxattr(nsd, param.xpath, param.value)
+
+                except Exception as e:
+                    self.log.exception(e)
+
+
+class NetworkServiceDescriptor(object):
+    """
+    Network service descriptor class
+    """
+
+    def __init__(self, dts, log, loop, nsd, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._nsd = nsd
+        self._ref_count = 0
+
+        self._nsm = nsm
+
+    @property
+    def id(self):
+        """ Returns nsd id """
+        return self._nsd.id
+
+    @property
+    def name(self):
+        """ Returns name of nsd """
+        return self._nsd.name
+
+    @property
+    def ref_count(self):
+        """ Returns reference count"""
+        return self._ref_count
+
+    def in_use(self):
+        """ Returns whether nsd is in use or not """
+        return True if self.ref_count > 0 else False
+
+    def ref(self):
+        """ Take a reference on this object """
+        self._ref_count += 1
+
+    def unref(self):
+        """ Release reference on this object """
+        if self.ref_count < 1:
+            msg = ("Unref on a NSD object - nsd id %s, ref_count = %s" %
+                   (self.id, self.ref_count))
+            self._log.critical(msg)
+            raise NetworkServiceDescriptorError(msg)
+        self._ref_count -= 1
+
+    @property
+    def msg(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return self._nsd
+
+    @staticmethod
+    def path_for_id(nsd_id):
+        """ Return path for the passed nsd_id"""
+        return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id)
+
+    def path(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return NetworkServiceDescriptor.path_for_id(self.id)
+
+    def update(self, nsd):
+        """ Update the NSD descriptor """
+        self._nsd = nsd
+
+
+class NsdDtsHandler(object):
+    """ The network service descriptor DTS handler """
+    XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsd create/update/delete/read requests from dts """
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+            self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)",
+                            xact, action)
+            # Create/Update an NSD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those NSD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('nsds', []) or is_recovery:
+                    self._nsm.update_nsd(cfg)
+
+            scratch.pop('nsds', None)
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def delete_nsd_libs(nsd_id):
+            """ Remove any files uploaded with NSD and stored under $RIFT_ARTIFACTS/libs/<id> """
+            try:
+                rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
+                nsd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', nsd_id)
+
+                if os.path.exists (nsd_dir):
+                    shutil.rmtree(nsd_dir, ignore_errors=True)
+            except Exception as e:
+                self._log.error("Exception in cleaning up NSD libs {}: {}".
+                                format(nsd_id, e))
+                self._log.excpetion(e)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for NSD config """
+
+            self._log.info("Got nsd prepare - config received nsd id %s, msg %s",
+                           msg.id, msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete an NSD record
+                self._log.debug("Deleting NSD with id %s", msg.id)
+                if self._nsm.nsd_in_use(msg.id):
+                    self._log.debug("Cannot delete NSD in use - %s", msg.id)
+                    err = "Cannot delete an NSD in use - %s" % msg.id
+                    raise NetworkServiceDescriptorRefCountExists(err)
+
+                yield from delete_nsd_libs(msg.id)
+                self._nsm.delete_nsd(msg.id)
+            else:
+                # Add this NSD to scratch to create/update in apply callback
+                nsds = scratch.setdefault('nsds', [])
+                nsds.append(msg.id)
+                # acg._scratch['nsds'].append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for NSD config using xpath: %s",
+            NsdDtsHandler.XPATH,
+            )
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            # Need a list in scratch to store NSDs to create/update later
+            # acg._scratch['nsds'] = list()
+            self._regh = acg.register(
+                xpath=NsdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                on_prepare=on_prepare)
+
+
+class VnfdDtsHandler(object):
+    """ DTS handler for VNFD config changes """
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        @asyncio.coroutine
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            # Create/Update a VNFD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those VNFD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('vnfds', []):
+                    self._nsm.update_vnfd(cfg)
+
+            for cfg in self._regh.elements:
+                if cfg.id in scratch.get('deleted_vnfds', []):
+                    yield from self._nsm.delete_vnfd(cfg.id)
+
+            scratch.pop('vnfds', None)
+            scratch.pop('deleted_vnfds', None)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)",
+                            ks_path.to_xpath(RwNsmYang.get_schema()), xact_info.query_action, msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            # Handle deletes in prepare_callback, but adds/updates in apply_callback
+            if fref.is_field_deleted():
+                self._log.debug("Adding msg to deleted field")
+                deleted_vnfds = scratch.setdefault('deleted_vnfds', [])
+                deleted_vnfds.append(msg.id)
+            else:
+                # Add this VNFD to scratch to create/update in apply callback
+                vnfds = scratch.setdefault('vnfds', [])
+                vnfds.append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for VNFD config using xpath: %s",
+            VnfdDtsHandler.XPATH,
+            )
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            # Need a list in scratch to store VNFDs to create/update later
+            # acg._scratch['vnfds'] = list()
+            # acg._scratch['deleted_vnfds'] = list()
+            self._regh = acg.register(
+                xpath=VnfdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                on_prepare=on_prepare)
+
+class NsrRpcDtsHandler(object):
+    """ The network service instantiation RPC DTS handler """
+    EXEC_NSR_CONF_XPATH = "I,/nsr:start-network-service"
+    EXEC_NSR_CONF_O_XPATH = "O,/nsr:start-network-service"
+    NETCONF_IP_ADDRESS = "127.0.0.1"
+    NETCONF_PORT = 2022
+    NETCONF_USER = "admin"
+    NETCONF_PW = "admin"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._nsd = None
+
+        self._ns_regh = None
+
+        self._manager = None
+
+        self._model = RwYang.Model.create_libncx()
+        self._model.load_schema_ypbc(RwNsrYang.get_schema())
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @staticmethod
+    def wrap_netconf_config_xml(xml):
+        xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(xml)
+        return xml
+
+    @asyncio.coroutine
+    def _connect(self, timeout_secs=240):
+
+        start_time = time.time()
+        while (time.time() - start_time) < timeout_secs:
+
+            try:
+                self._log.debug("Attemping NsmTasklet netconf connection.")
+
+                manager = yield from ncclient.asyncio_manager.asyncio_connect(
+                        loop=self._loop,
+                        host=NsrRpcDtsHandler.NETCONF_IP_ADDRESS,
+                        port=NsrRpcDtsHandler.NETCONF_PORT,
+                        username=NsrRpcDtsHandler.NETCONF_USER,
+                        password=NsrRpcDtsHandler.NETCONF_PW,
+                        allow_agent=False,
+                        look_for_keys=False,
+                        hostkey_verify=False,
+                        )
+
+                return manager
+
+            except ncclient.transport.errors.SSHError as e:
+                self._log.warning("Netconf connection to launchpad %s failed: %s",
+                                  NsrRpcDtsHandler.NETCONF_IP_ADDRESS, str(e))
+
+            yield from asyncio.sleep(5, loop=self._loop)
+
+        raise NsrInstantiationFailed("Failed to connect to Launchpad within %s seconds" %
+                                      timeout_secs)
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NS monitoring read from dts """
+        @asyncio.coroutine
+        def on_ns_config_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts start-network-service"""
+            assert action == rwdts.QueryAction.RPC
+            rpc_ip = msg
+            rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
+                    "nsr_id":str(uuid.uuid4())
+                })
+            
+            if not ('name' in rpc_ip and  'nsd_ref' in rpc_ip and 'cloud_account' in rpc_ip):
+                self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
+                
+
+            self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
+
+            try:
+                # Add used value to the pool
+                self._log.debug("RPC output: {}".format(rpc_op))
+                nsd_copy = self.nsm.get_nsd(rpc_ip.nsd_ref)
+
+                if not self._manager:
+                    self._manager = yield from self._connect()
+        
+                self._log.debug("Configuring ns-instance-config with name  %s nsd-ref: %s",
+                        rpc_ip.name, rpc_ip.nsd_ref)
+
+                ns_instance_config_dict = {"id":rpc_op.nsr_id, "admin_status":"ENABLED"}
+                ns_instance_config_copy_dict = {k:v for k, v in rpc_ip.as_dict().items()
+                                                if k in RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr().fields}
+                ns_instance_config_dict.update(ns_instance_config_copy_dict)
+
+                ns_instance_config = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
+                ns_instance_config.nsd = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+                ns_instance_config.nsd.from_dict(nsd_copy.msg.as_dict())
+
+                xml = ns_instance_config.to_xml_v2(self._model)
+                netconf_xml = self.wrap_netconf_config_xml(xml)
+
+                self._log.debug("Sending configure ns-instance-config xml to %s: %s",
+                        netconf_xml, NsrRpcDtsHandler.NETCONF_IP_ADDRESS)
+
+                response = yield from self._manager.edit_config(
+                           target="running",
+                           config=netconf_xml,
+                           )
+                self._log.debug("Received edit config response: %s", str(response))
+
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                        NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH,
+                                        rpc_op)
+            except Exception as e:
+                self._log.error("Exception processing the "
+                                "start-network-service: {}".format(e))
+                self._log.exception(e)
+                xact_info.respond_xpath(rwdts.XactRspCode.NACK,
+                                        NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH)
+
+
+        hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,)
+
+        with self._dts.group_create() as group:
+            self._ns_regh = group.register(xpath=NsrRpcDtsHandler.EXEC_NSR_CONF_XPATH,
+                                           handler=hdl_ns,
+                                           flags=rwdts.Flag.PUBLISHER,
+                                          )
+
+
+class NsrDtsHandler(object):
+    """ The network service DTS handler """
+    NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
+    SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._nsr_regh = None
+        self._scale_regh = None
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr create/update/delete/read requests from dts """
+
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+        def is_instance_in_reg_elements(nsr_id, group_name, instance_id):
+            """ Return boolean indicating if scaling group instance was already commited previously.
+
+            By looking at the existing elements in this registration handle (elements not part
+            of this current xact), we can tell if the instance was configured previously without
+            keeping any application state.
+            """
+            for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                elem_group_name = group_name_from_keyspec(keyspec)
+
+                if elem_nsr_id != nsr_id or group_name != elem_group_name:
+                    continue
+
+                if instance_cfg.id == instance_id:
+                    return True
+
+            return False
+
+        def get_scale_group_instance_delta(nsr_id, group_name, xact):
+            delta = {"added": [], "deleted": []}
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(xact, include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                delta["added"].append(instance_cfg.id)
+
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                if instance_cfg.id in delta["added"]:
+                    delta["added"].remove(instance_cfg.id)
+                else:
+                    delta["deleted"].append(instance_cfg.id)
+
+            return delta
+
+        @asyncio.coroutine
+        def update_nsr_nsd(nsr_id, xact, scratch):
+
+            @asyncio.coroutine
+            def get_nsr_vl_delta(nsr_id, xact, scratch):
+                delta = {"added": [], "deleted": []}
+                for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(xact, include_keyspec=True):
+                    elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                    if elem_nsr_id != nsr_id:
+                        continue
+
+                    if 'vld' in instance_cfg.nsd:
+                        for vld in instance_cfg.nsd.vld:
+                            delta["added"].append(vld)
+
+                for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                    self._log.debug("NSR update: %s", instance_cfg)
+                    elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                    if elem_nsr_id != nsr_id:
+                        continue
+
+                    if 'vld' in instance_cfg.nsd:
+                        for vld in instance_cfg.nsd.vld:
+                            if vld in delta["added"]:
+                                delta["added"].remove(vld)
+                            else:
+                                delta["deleted"].append(vld)
+
+                return delta
+
+            vl_delta = yield from get_nsr_vl_delta(nsr_id, xact, scratch)
+            self._log.debug("Got NSR:%s VL instance delta: %s", nsr_id, vl_delta)
+
+            for vld in vl_delta["added"]:
+                yield from self._nsm.nsr_instantiate_vl(nsr_id, vld)
+
+            for vld in vl_delta["deleted"]:
+                yield from self._nsm.nsr_terminate_vl(nsr_id, vld)
+
+        def get_add_delete_update_cfgs(dts_member_reg, xact, key_name, scratch):
+            # Unfortunately, it is currently difficult to figure out what has exactly
+            # changed in this xact without Pbdelta support (RIFT-4916)
+            # As a workaround, we can fetch the pre and post xact elements and
+            # perform a comparison to figure out adds/deletes/updates
+            xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+            curr_cfgs = list(dts_member_reg.elements)
+
+            xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+            curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+            # Find Adds
+            added_keys = set(xact_key_map) - set(curr_key_map)
+            added_cfgs = [xact_key_map[key] for key in added_keys]
+
+            # Find Deletes
+            deleted_keys = set(curr_key_map) - set(xact_key_map)
+            deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+            # Find Updates
+            updated_keys = set(curr_key_map) & set(xact_key_map)
+            updated_cfgs = [xact_key_map[key] for key in updated_keys
+                            if xact_key_map[key] != curr_key_map[key]]
+
+            return added_cfgs, deleted_cfgs, updated_cfgs
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            def handle_create_nsr(msg, restart_mode=False):
+                # Handle create nsr requests """
+                # Do some validations
+                if not msg.has_field("nsd"):
+                    err = "NSD not provided"
+                    self._log.error(err)
+                    raise NetworkServiceRecordError(err)
+
+                self._log.debug("Creating NetworkServiceRecord %s  from nsr config  %s",
+                               msg.id, msg.as_dict())
+                nsr = self.nsm.create_nsr(msg, restart_mode=restart_mode)
+                return nsr
+
+            def handle_delete_nsr(msg):
+                @asyncio.coroutine
+                def delete_instantiation(ns_id):
+                    """ Delete instantiation """
+                    with self._dts.transaction() as xact:
+                        yield from self._nsm.terminate_ns(ns_id, xact)
+
+                # Handle delete NSR requests
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                nsr.record_event("terminate-rcvd", event_descr)
+
+                self._loop.create_task(delete_instantiation(msg.id))
+
+            @asyncio.coroutine
+            def begin_instantiation(nsr):
+                # Begin instantiation
+                self._log.info("Beginning NS instantiation: %s", nsr.id)
+                yield from self._nsm.instantiate_ns(nsr.id, xact)
+
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                for element in self._nsr_regh.elements:
+                    nsr = handle_create_nsr(element, restart_mode=True)
+                    self._loop.create_task(begin_instantiation(nsr))
+
+
+            (added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh,
+                                                                                  xact,
+                                                                                  "id",
+                                                                                  scratch)
+            self._log.debug("Added: %s, Deleted: %s, Updated: %s", added_msgs,
+                            deleted_msgs, updated_msgs)
+
+            for msg in added_msgs:
+                if msg.id not in self._nsm.nsrs:
+                    self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                    nsr = handle_create_nsr(msg)
+                    self._loop.create_task(begin_instantiation(nsr))
+
+            for msg in deleted_msgs:
+                self._log.info("Delete NSR received in on_apply to terminate NS:%s", msg.id)
+                try:
+                    handle_delete_nsr(msg)
+                except Exception:
+                    self._log.exception("Failed to terminate NS:%s", msg.id)
+
+            for msg in updated_msgs:
+                self._log.info("Update NSR received in on_apply: %s", msg)
+
+                self._nsm.nsr_update_cfg(msg.id, msg)
+
+                if 'nsd' in msg:
+                    self._loop.create_task(update_nsr_nsd(msg.id, xact, scratch))
+
+                for group in msg.scaling_group:
+                    instance_delta = get_scale_group_instance_delta(msg.id, group.scaling_group_name_ref, xact)
+                    self._log.debug("Got NSR:%s scale group instance delta: %s", msg.id, instance_delta)
+
+                    for instance_id in instance_delta["added"]:
+                        self._nsm.scale_nsr_out(msg.id, group.scaling_group_name_ref, instance_id, xact)
+
+                    for instance_id in instance_delta["deleted"]:
+                        self._nsm.scale_nsr_in(msg.id, group.scaling_group_name_ref, instance_id)
+
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare calllback from DTS for NSR """
+
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            action = xact_info.query_action
+            self._log.debug(
+                    "Got Nsr prepare callback (xact: %s) (action: %s) (info: %s), %s:%s)",
+                    xact, action, xact_info, xpath, msg
+                    )
+
+            @asyncio.coroutine
+            def delete_instantiation(ns_id):
+                """ Delete instantiation """
+                yield from self._nsm.terminate_ns(ns_id, None)
+
+            def handle_delete_nsr():
+                """ Handle delete NSR requests """
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                nsr.record_event("terminate-rcvd", event_descr)
+
+                self._loop.create_task(delete_instantiation(msg.id))
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE, rwdts.QueryAction.DELETE]:
+                # if this is an NSR create
+                if action != rwdts.QueryAction.DELETE and msg.id not in self._nsm.nsrs:
+                    # Ensure the Cloud account/datacenter has been specified
+                    if not msg.has_field("cloud_account") and not msg.has_field("om_datacenter"):
+                        raise NsrInstantiationFailed("Cloud account or datacenter not specified in NSR")
+
+                    # Check if nsd is specified
+                    if not msg.has_field("nsd"):
+                        raise NsrInstantiationFailed("NSD not specified in NSR")
+
+                else:
+                    nsr = self._nsm.nsrs[msg.id]
+
+                    if msg.has_field("nsd"):
+                        if nsr.state != NetworkServiceRecordState.RUNNING:
+                            raise NsrVlUpdateError("Unable to update VL when NSR not in running state")
+                        if 'vld' not in msg.nsd or len(msg.nsd.vld) == 0:
+                            raise NsrVlUpdateError("NS config NSD should have atleast 1 VLD defined")
+
+                    if msg.has_field("scaling_group"):
+                        if nsr.state != NetworkServiceRecordState.RUNNING:
+                            raise ScalingOperationError("Unable to perform scaling action when NS is not in running state")
+
+                        if len(msg.scaling_group) > 1:
+                            raise ScalingOperationError("Only a single scaling group can be configured at a time")
+
+                        for group_msg in msg.scaling_group:
+                            num_new_group_instances = len(group_msg.instance)
+                            if num_new_group_instances > 1:
+                                raise ScalingOperationError("Only a single scaling instance can be modified at a time")
+
+                            elif num_new_group_instances == 1:
+                                scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]
+                                if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                                    if len(scale_group.instances) == scale_group.max_instance_count:
+                                        raise ScalingOperationError("Max instances for %s reached" % scale_group)
+
+            acg.handle.prepare_complete_ok(xact_info.handle)
+
+
+        self._log.debug("Registering for NSR config using xpath: %s",
+                        NsrDtsHandler.NSR_XPATH)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      on_prepare=on_prepare)
+
+            self._scale_regh = acg.register(
+                                      xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
+                                      )
+
+
+class NsrOpDataDtsHandler(object):
+    """ The network service op data DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return the registration handle"""
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr op data publisher registration"""
+        self._log.debug("Registering Nsr op data path %s as publisher",
+                        NsrOpDataDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        handlers = rift.tasklets.Group.Handler()
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ | rwdts.Flag.DATASTORE)
+
+    @asyncio.coroutine
+    def create(self, path, msg):
+        """
+        Create an NS record in DTS with the path and message
+        """
+        self._log.debug("Creating NSR %s:%s", path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created NSR, %s:%s", path, msg)
+
+    @asyncio.coroutine
+    def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Updating NSR, %s:%s regh = %s", path, msg, self.regh)
+        self.regh.update_element(path, msg, flags)
+        self._log.debug("Updated NSR, %s:%s", path, msg)
+
+    @asyncio.coroutine
+    def delete(self, path):
+        """
+        Update an NS record in DTS with the path and message
+        """
+        self._log.debug("Deleting NSR path:%s", path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted NSR path:%s", path)
+
+
+class VnfrDtsHandler(object):
+    """ The virtual network service DTS handler """
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vnfr create/update/delete/ advises from dts """
+
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+            self._log.debug(
+                "Got vnfr on_prepare cb (xact_info: %s, action: %s): %s:%s",
+                xact_info, action, ks_path, msg
+                )
+
+            schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+            path_entry = schema.keyspec_to_entry(ks_path)
+            if path_entry.key00.id not in self._nsm._vnfrs:
+                self._log.error("%s request for non existent record path %s",
+                                action, xpath)
+                xact_info.respond_xpath(rwdts.XactRspCode.NA, xpath)
+
+                return
+
+                self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
+            if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
+                yield from self._nsm.update_vnfr(msg)
+            elif action == rwdts.QueryAction.DELETE:
+                self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
+                self._nsm.delete_vnfr(path_entry.key00.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
+                                                    on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.SUBSCRIBER),)
+
+
+class NsdRefCountDtsHandler(object):
+    """ The NSD Ref Count DTS handler """
+    XPATH = "D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for NSD ref count read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwNsrYang.get_schema())
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                nsd_list = yield from self._nsm.get_nsd_refcount(path_entry.key00.nsd_id_ref)
+                for xpath, msg in nsd_list:
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE,
+                                            xpath=xpath,
+                                            msg=msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            else:
+                raise NetworkServiceRecordError("Not supported operation %s" % action)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=NsdRefCountDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,)
+
+
+class NsManager(object):
+    """ The Network Service Manager class"""
+    def __init__(self, dts, log, loop,
+                 nsr_handler, vnfr_handler, vlr_handler, ro_plugin_selector,
+                 vnffgmgr, vnfd_pub_handler, cloud_account_handler):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_handler = nsr_handler
+        self._vnfr_pub_handler = vnfr_handler
+        self._vlr_pub_handler = vlr_handler
+        self._vnffgmgr = vnffgmgr
+        self._vnfd_pub_handler = vnfd_pub_handler
+        self._cloud_account_handler = cloud_account_handler
+
+        self._ro_plugin_selector = ro_plugin_selector
+        self._ncclient = rift.mano.ncclient.NcClient(
+              host="127.0.0.1",
+              port=2022,
+              username="admin",
+              password="admin",
+              loop=self._loop)
+
+        self._nsrs = {}
+        self._nsds = {}
+        self._vnfds = {}
+        self._vnfrs = {}
+
+        self.cfgmgr_obj = conman.ROConfigManager(log, loop, dts, self)
+
+        # TODO: All these handlers should move to tasklet level.
+        # Passing self is often an indication of bad design
+        self._nsd_dts_handler = NsdDtsHandler(dts, log, loop, self)
+        self._vnfd_dts_handler = VnfdDtsHandler(dts, log, loop, self)
+        self._dts_handlers = [self._nsd_dts_handler,
+                              VnfrDtsHandler(dts, log, loop, self),
+                              NsdRefCountDtsHandler(dts, log, loop, self),
+                              NsrDtsHandler(dts, log, loop, self),
+                              ScalingRpcHandler(log, dts, loop, self.scale_rpc_callback),
+                              NsrRpcDtsHandler(dts,log,loop,self),
+                              self._vnfd_dts_handler,
+                              self.cfgmgr_obj,
+                              ]
+
+
+    @property
+    def log(self):
+        """ Log handle """
+        return self._log
+
+    @property
+    def loop(self):
+        """ Loop """
+        return self._loop
+
+    @property
+    def dts(self):
+        """ DTS handle """
+        return self._dts
+
+    @property
+    def nsr_handler(self):
+        """" NSR handler """
+        return self._nsr_handler
+
+    @property
+    def so_obj(self):
+        """" So Obj handler """
+        return self._so_obj
+
+    @property
+    def nsrs(self):
+        """ NSRs in this NSM"""
+        return self._nsrs
+
+    @property
+    def nsds(self):
+        """ NSDs in this NSM"""
+        return self._nsds
+
+    @property
+    def vnfds(self):
+        """ VNFDs in this NSM"""
+        return self._vnfds
+
+    @property
+    def vnfrs(self):
+        """ VNFRs in this NSM"""
+        return self._vnfrs
+
+    @property
+    def nsr_pub_handler(self):
+        """ NSR publication handler """
+        return self._nsr_handler
+
+    @property
+    def vnfr_pub_handler(self):
+        """ VNFR publication handler """
+        return self._vnfr_pub_handler
+
+    @property
+    def vlr_pub_handler(self):
+        """ VLR publication handler """
+        return self._vlr_pub_handler
+
+    @property
+    def vnfd_pub_handler(self):
+        return self._vnfd_pub_handler
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers """
+        for dts_handle in self._dts_handlers:
+            yield from dts_handle.register()
+
+
+    def get_ns_by_nsr_id(self, nsr_id):
+        """ get NSR by nsr id """
+        if nsr_id not in self._nsrs:
+            raise NetworkServiceRecordError("NSR id %s not found" % nsr_id)
+
+        return self._nsrs[nsr_id]
+
+    def scale_nsr_out(self, nsr_id, scale_group_name, instance_id, config_xact):
+        self.log.debug("Scale out NetworkServiceRecord (nsr_id: %s) (scaling group: %s) (instance_id: %s)",
+                       nsr_id,
+                       scale_group_name,
+                       instance_id
+                       )
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise ScalingOperationError("Cannot perform scaling operation if NSR is not in running state")
+
+        self._loop.create_task(nsr.create_scale_group_instance(scale_group_name, instance_id, config_xact))
+
+    def scale_nsr_in(self, nsr_id, scale_group_name, instance_id):
+        self.log.debug("Scale in NetworkServiceRecord (nsr_id: %s) (scaling group: %s) (instance_id: %s)",
+                       nsr_id,
+                       scale_group_name,
+                       instance_id,
+                       )
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise ScalingOperationError("Cannot perform scaling operation if NSR is not in running state")
+
+        self._loop.create_task(nsr.delete_scale_group_instance(scale_group_name, instance_id))
+
+    def scale_rpc_callback(self, xact, msg, action):
+        """Callback handler for RPC calls
+        Args:
+            xact : Transaction Handler
+            msg : RPC input
+            action : Scaling Action
+        """
+        ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
+        ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+
+        xpath = ('C,/nsr:ns-instance-config/nsr:nsr[nsr:id="{}"]').format(
+                          msg.nsr_id_ref)
+        instance = ScalingGroupInstance.from_dict({"id": msg.instance_id})
+
+        @asyncio.coroutine
+        def get_nsr_scaling_group():
+            results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+            for result in results:
+                res = yield from result
+                nsr_config = res.result
+
+            for scaling_group in nsr_config.scaling_group:
+                if scaling_group.scaling_group_name_ref == msg.scaling_group_name_ref:
+                    break
+            else:
+                scaling_group = nsr_config.scaling_group.add()
+                scaling_group.scaling_group_name_ref = msg.scaling_group_name_ref
+
+            return (nsr_config, scaling_group)
+
+        @asyncio.coroutine
+        def update_config(nsr_config):
+            xml = self._ncclient.convert_to_xml(RwNsrYang, nsr_config)
+            xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(xml)
+            yield from self._ncclient.connect()
+            yield from self._ncclient.manager.edit_config(target="running", config=xml, default_operation="replace")
+
+        @asyncio.coroutine
+        def scale_out():
+            nsr_config, scaling_group = yield from get_nsr_scaling_group()
+            scaling_group.instance.append(instance)
+            yield from update_config(nsr_config)
+
+        @asyncio.coroutine
+        def scale_in():
+            nsr_config, scaling_group = yield from get_nsr_scaling_group()
+            scaling_group.instance.remove(instance)
+            yield from update_config(nsr_config)
+
+        if action == ScalingRpcHandler.ACTION.SCALE_OUT:
+            self._loop.create_task(scale_out())
+        else:
+            self._loop.create_task(scale_in())
+
+        # Opdata based calls, disabled for now!
+        # if action == ScalingRpcHandler.ACTION.SCALE_OUT:
+        #     self.scale_nsr_out(
+        #           msg.nsr_id_ref,
+        #           msg.scaling_group_name_ref,
+        #           msg.instance_id,
+        #           xact)
+        # else:
+        #     self.scale_nsr_in(
+        #           msg.nsr_id_ref,
+        #           msg.scaling_group_name_ref,
+        #           msg.instance_id)
+    
+    def nsr_update_cfg(self, nsr_id, msg):
+        nsr = self._nsrs[nsr_id]
+        nsr.nsr_cfg_msg= msg
+
+    def nsr_instantiate_vl(self, nsr_id, vld):
+        self.log.debug("NSR {} create VL {}".format(nsr_id, vld))
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise NsrVlUpdateError("Cannot perform VL instantiate if NSR is not in running state")
+
+        # Not calling in a separate task as this is called from a separate task
+        yield from nsr.create_vl_instance(vld)
+
+    def nsr_terminate_vl(self, nsr_id, vld):
+        self.log.debug("NSR {} delete VL {}".format(nsr_id, vld.id))
+        nsr = self._nsrs[nsr_id]
+        if nsr.state != NetworkServiceRecordState.RUNNING:
+            raise NsrVlUpdateError("Cannot perform VL terminate if NSR is not in running state")
+
+        # Not calling in a separate task as this is called from a separate task
+        yield from nsr.delete_vl_instance(vld)
+
+    def create_nsr(self, nsr_msg, restart_mode=False):
+        """ Create an NSR instance """
+        if nsr_msg.id in self._nsrs:
+            msg = "NSR id %s already exists" % nsr_msg.id
+            self._log.error(msg)
+            raise NetworkServiceRecordError(msg)
+
+        self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s",
+                       nsr_msg.id,
+                       nsr_msg.nsd.id)
+
+        nsm_plugin = self._ro_plugin_selector.ro_plugin
+        sdn_account_name = self._cloud_account_handler.get_cloud_account_sdn_name(nsr_msg.cloud_account)
+
+        nsr = NetworkServiceRecord(self._dts,
+                                   self._log,
+                                   self._loop,
+                                   self,
+                                   nsm_plugin,
+                                   nsr_msg,
+                                   sdn_account_name,
+                                   restart_mode=restart_mode
+                                   )
+        self._nsrs[nsr_msg.id] = nsr
+        nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd)
+
+        return nsr
+
+    def delete_nsr(self, nsr_id):
+        """
+        Delete NSR with the passed nsr id
+        """
+        del self._nsrs[nsr_id]
+
+    @asyncio.coroutine
+    def instantiate_ns(self, nsr_id, config_xact):
+        """ Instantiate an NS instance """
+        self._log.debug("Instantiating Network service id %s", nsr_id)
+        if nsr_id not in self._nsrs:
+            err = "NSR id %s not found " % nsr_id
+            self._log.error(err)
+            raise NetworkServiceRecordError(err)
+
+        nsr = self._nsrs[nsr_id]
+        yield from nsr.nsm_plugin.instantiate_ns(nsr, config_xact)
+
+    @asyncio.coroutine
+    def update_vnfr(self, vnfr):
+        """Create/Update an VNFR """
+
+        vnfr_state = self._vnfrs[vnfr.id].state
+        self._log.debug("Updating VNFR with state %s: vnfr %s", vnfr_state, vnfr)
+
+        yield from self._vnfrs[vnfr.id].update_state(vnfr)
+        nsr = self.find_nsr_for_vnfr(vnfr.id)
+        yield from nsr.update_state()
+
+    def find_nsr_for_vnfr(self, vnfr_id):
+        """ Find the NSR which )has the passed vnfr id"""
+        for nsr in list(self.nsrs.values()):
+            for vnfr in list(nsr.vnfrs.values()):
+                if vnfr.id == vnfr_id:
+                    return nsr
+        return None
+
+    def delete_vnfr(self, vnfr_id):
+        """ Delete VNFR  with the passed id"""
+        del self._vnfrs[vnfr_id]
+
+    def get_nsd_ref(self, nsd_id):
+        """ Get network service descriptor for the passed nsd_id
+            with a reference"""
+        nsd = self.get_nsd(nsd_id)
+        nsd.ref()
+        return nsd
+
+    @asyncio.coroutine
+    def get_nsr_config(self, nsd_id):
+        xpath = "C,/nsr:ns-instance-config"
+        results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+        for result in results:
+            entry = yield from result
+            ns_instance_config = entry.result
+
+            for nsr in ns_instance_config.nsr:
+                if nsr.nsd.id == nsd_id:
+                    return nsr
+
+        return None
+
+    @asyncio.coroutine
+    def nsd_unref_by_nsr_id(self, nsr_id):
+        """ Unref the network service descriptor based on NSR id """
+        self._log.debug("NSR Unref called for Nsr Id:%s", nsr_id)
+        if nsr_id in self._nsrs:
+            nsr = self._nsrs[nsr_id]
+
+            try:
+                nsd = self.get_nsd(nsr.nsd_id)
+                self._log.debug("Releasing ref on NSD %s held by NSR %s - Curr %d",
+                                nsd.id, nsr.id, nsd.ref_count)
+                nsd.unref()
+            except NetworkServiceDescriptorError:
+                # We store a copy of NSD in NSR and the NSD in nsd-catalog
+                # could be deleted
+                pass
+
+        else:
+            self._log.error("Cannot find NSR with id %s", nsr_id)
+            raise NetworkServiceDescriptorUnrefError("No NSR with id" % nsr_id)
+
+    @asyncio.coroutine
+    def nsd_unref(self, nsd_id):
+        """ Unref the network service descriptor associated with the id """
+        nsd = self.get_nsd(nsd_id)
+        nsd.unref()
+
+    def get_nsd(self, nsd_id):
+        """ Get network service descriptor for the passed nsd_id"""
+        if nsd_id not in self._nsds:
+            self._log.error("Cannot find NSD id:%s", nsd_id)
+            raise NetworkServiceDescriptorError("Cannot find NSD id:%s", nsd_id)
+
+        return self._nsds[nsd_id]
+
+    def create_nsd(self, nsd_msg):
+        """ Create a network service descriptor """
+        self._log.debug("Create network service descriptor - %s", nsd_msg)
+        if nsd_msg.id in self._nsds:
+            self._log.error("Cannot create NSD %s -NSD ID already exists", nsd_msg)
+            raise NetworkServiceDescriptorError("NSD already exists-%s", nsd_msg.id)
+
+        nsd = NetworkServiceDescriptor(
+                self._dts,
+                self._log,
+                self._loop,
+                nsd_msg,
+                self
+                )
+        self._nsds[nsd_msg.id] = nsd
+
+        return nsd
+
+    def update_nsd(self, nsd):
+        """ update the Network service descriptor """
+        self._log.debug("Update network service descriptor - %s", nsd)
+        if nsd.id not in self._nsds:
+            self._log.debug("No NSD found - creating NSD id = %s", nsd.id)
+            self.create_nsd(nsd)
+        else:
+            self._log.debug("Updating NSD id = %s, nsd = %s", nsd.id, nsd)
+            self._nsds[nsd.id].update(nsd)  
+
+    def delete_nsd(self, nsd_id):
+        """ Delete the Network service descriptor with the passed id """
+        self._log.debug("Deleting the network service descriptor - %s", nsd_id)
+        if nsd_id not in self._nsds:
+            self._log.debug("Delete NSD failed - cannot find nsd-id %s", nsd_id)
+            raise NetworkServiceDescriptorNotFound("Cannot find %s", nsd_id)
+
+        if nsd_id not in self._nsds:
+            self._log.debug("Cannot delete NSD id %s reference exists %s",
+                            nsd_id,
+                            self._nsds[nsd_id].ref_count)
+            raise NetworkServiceDescriptorRefCountExists(
+                "Cannot delete :%s, ref_count:%s",
+                nsd_id,
+                self._nsds[nsd_id].ref_count)
+
+        del self._nsds[nsd_id]
+
+    def get_vnfd_config(self, xact):
+        vnfd_dts_reg = self._vnfd_dts_handler.regh
+        for cfg in vnfd_dts_reg.get_xact_elements(xact):
+            if cfg.id not in self._vnfds:
+                self.create_vnfd(cfg)
+
+    def get_vnfd(self, vnfd_id, xact):
+        """ Get virtual network function descriptor for the passed vnfd_id"""
+        if vnfd_id not in self._vnfds:
+            self._log.error("Cannot find VNFD id:%s", vnfd_id)
+            self.get_vnfd_config(xact)
+
+            if vnfd_id not in self._vnfds:
+                self._log.error("Cannot find VNFD id:%s", vnfd_id)
+                raise VnfDescriptorError("Cannot find VNFD id:%s", vnfd_id)
+
+        return self._vnfds[vnfd_id]
+
+    def create_vnfd(self, vnfd):
+        """ Create a virtual network function descriptor """
+        self._log.debug("Create virtual network function descriptor - %s", vnfd)
+        if vnfd.id in self._vnfds:
+            self._log.error("Cannot create VNFD %s -VNFD ID already exists", vnfd)
+            raise VnfDescriptorError("VNFD already exists-%s", vnfd.id)
+
+        self._vnfds[vnfd.id] = vnfd
+        return self._vnfds[vnfd.id]
+
+    def update_vnfd(self, vnfd):
+        """ Update the virtual network function descriptor """
+        self._log.debug("Update virtual network function descriptor- %s", vnfd)
+
+        # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
+        for ivld in vnfd.internal_vld:
+            ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
+
+        if vnfd.id not in self._vnfds:
+            self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
+            self.create_vnfd(vnfd)
+        else:
+            self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
+            self._vnfds[vnfd.id] = vnfd
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        """ Delete the virtual network function descriptor with the passed id """
+        self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
+        if vnfd_id not in self._vnfds:
+            self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
+            raise VnfDescriptorError("Cannot find %s", vnfd_id)
+
+        del self._vnfds[vnfd_id]
+
+    def nsd_in_use(self, nsd_id):
+        """ Is the NSD with the passed id in use """
+        self._log.debug("Is this NSD in use - msg:%s", nsd_id)
+        if nsd_id in self._nsds:
+            return self._nsds[nsd_id].in_use()
+        return False
+
+    @asyncio.coroutine
+    def publish_nsr(self, xact, path, msg):
+        """ Publish a NSR """
+        self._log.debug("Publish NSR with path %s, msg %s",
+                        path, msg)
+        yield from self.nsr_handler.update(xact, path, msg)
+
+    @asyncio.coroutine
+    def unpublish_nsr(self, xact, path):
+        """ Un Publish an NSR """
+        self._log.debug("Publishing delete NSR with path %s", path)
+        yield from self.nsr_handler.delete(path, xact)
+
+    def vnfr_is_ready(self, vnfr_id):
+        """ VNFR with the id is ready """
+        self._log.debug("VNFR id %s ready", vnfr_id)
+        if vnfr_id not in self._vnfds:
+            err = "Did not find VNFR ID with id %s" % vnfr_id
+            self._log.critical("err")
+            raise VirtualNetworkFunctionRecordError(err)
+        self._vnfrs[vnfr_id].is_ready()
+
+    @asyncio.coroutine
+    def get_nsd_refcount(self, nsd_id):
+        """ Get the nsd_list from this NSM"""
+
+        def nsd_refcount_xpath(nsd_id):
+            """ xpath for ref count entry """
+            return (NsdRefCountDtsHandler.XPATH +
+                    "[rw-nsr:nsd-id-ref = '{}']").format(nsd_id)
+
+        nsd_list = []
+        if nsd_id is None or nsd_id == "":
+            for nsd in self._nsds.values():
+                nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+                nsd_msg.nsd_id_ref = nsd.id
+                nsd_msg.instance_ref_count = nsd.ref_count
+                nsd_list.append((nsd_refcount_xpath(nsd.id), nsd_msg))
+        elif nsd_id in self._nsds:
+            nsd_msg = RwNsrYang.YangData_Nsr_NsInstanceOpdata_NsdRefCount()
+            nsd_msg.nsd_id_ref = self._nsds[nsd_id].id
+            nsd_msg.instance_ref_count = self._nsds[nsd_id].ref_count
+            nsd_list.append((nsd_refcount_xpath(nsd_id), nsd_msg))
+
+        return nsd_list
+
+    @asyncio.coroutine
+    def terminate_ns(self, nsr_id, xact):
+        """
+        Terminate network service for the given NSR Id
+        """
+
+        # Terminate the instances/networks assocaited with this nw service
+        self._log.debug("Terminating the network service %s", nsr_id)
+        yield from self._nsrs[nsr_id].terminate()
+
+        # Unref the NSD
+        yield from self.nsd_unref_by_nsr_id(nsr_id)
+
+        # Unpublish the NSR record
+        self._log.debug("Unpublishing the network service %s", nsr_id)
+        yield from self._nsrs[nsr_id].unpublish(xact)
+
+        # Finaly delete the NS instance from this NS Manager
+        self._log.debug("Deletng the network service %s", nsr_id)
+        self.delete_nsr(nsr_id)
+
+
+class NsmRecordsPublisherProxy(object):
+    """ This class provides a publisher interface that allows plugin objects
+        to publish NSR/VNFR/VLR"""
+
+    def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsr_pub_hdlr = nsr_pub_hdlr
+        self._vlr_pub_hdlr = vlr_pub_hdlr
+        self._vnfr_pub_hdlr = vnfr_pub_hdlr
+
+    @asyncio.coroutine
+    def publish_nsr(self, xact, nsr):
+        """ Publish an NSR """
+        path = NetworkServiceRecord.xpath_from_nsr(nsr)
+        return (yield from self._nsr_pub_hdlr.update(xact, path, nsr))
+
+    @asyncio.coroutine
+    def unpublish_nsr(self, xact, nsr):
+        """ Unpublish an NSR """
+        path = NetworkServiceRecord.xpath_from_nsr(nsr)
+        return (yield from self._nsr_pub_hdlr.delete(xact, path))
+
+    @asyncio.coroutine
+    def publish_vnfr(self, xact, vnfr):
+        """ Publish an VNFR """
+        path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
+        return (yield from self._vnfr_pub_hdlr.update(xact, path, vnfr))
+
+    @asyncio.coroutine
+    def unpublish_vnfr(self, xact, vnfr):
+        """ Unpublish a VNFR """
+        path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
+        return (yield from self._vnfr_pub_hdlr.delete(xact, path))
+
+    @asyncio.coroutine
+    def publish_vlr(self, xact, vlr):
+        """ Publish a VLR """
+        path = VirtualLinkRecord.vlr_xpath(vlr)
+        return (yield from self._vlr_pub_hdlr.update(xact, path, vlr))
+
+    @asyncio.coroutine
+    def unpublish_vlr(self, xact, vlr):
+        """ Unpublish a VLR """
+        path = VirtualLinkRecord.vlr_xpath(vlr)
+        return (yield from self._vlr_pub_hdlr.delete(xact, path))
+
+
+class ScalingRpcHandler(mano_dts.DtsHandler):
+    """ The Network service Monitor DTS handler """
+    SCALE_IN_INPUT_XPATH = "I,/nsr:exec-scale-in"
+    SCALE_IN_OUTPUT_XPATH = "O,/nsr:exec-scale-in"
+
+    SCALE_OUT_INPUT_XPATH = "I,/nsr:exec-scale-out"
+    SCALE_OUT_OUTPUT_XPATH = "O,/nsr:exec-scale-out"
+
+    ACTION = Enum('ACTION', 'SCALE_IN SCALE_OUT')
+
+    def __init__(self, log, dts, loop, callback=None):
+        super().__init__(log, dts, loop)
+        self.callback = callback
+        self.last_instance_id = defaultdict(int)
+
+    @asyncio.coroutine
+    def register(self):
+
+        @asyncio.coroutine
+        def on_scale_in_prepare(xact_info, action, ks_path, msg):
+            assert action == rwdts.QueryAction.RPC
+
+            try:
+                if self.callback:
+                    self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
+
+                rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
+                      "instance_id": msg.instance_id})
+
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    self.__class__.SCALE_IN_OUTPUT_XPATH,
+                    rpc_op)
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.NACK,
+                    self.__class__.SCALE_IN_OUTPUT_XPATH)
+
+        @asyncio.coroutine
+        def on_scale_out_prepare(xact_info, action, ks_path, msg):
+            assert action == rwdts.QueryAction.RPC
+
+            try:
+                scaling_group = msg.scaling_group_name_ref
+                if not msg.instance_id:
+                    last_instance_id = self.last_instance_id[scale_group]
+                    msg.instance_id  = last_instance_id + 1
+                    self.last_instance_id[scale_group] += 1
+
+                if self.callback:
+                    self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
+
+                rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
+                      "instance_id": msg.instance_id})
+
+                xact_info.respond_xpath(
+                    rwdts.XactRspCode.ACK,
+                    self.__class__.SCALE_OUT_OUTPUT_XPATH,
+                    rpc_op)
+
+            except Exception as e:
+                self.log.exception(e)
+                xact_info.respond_xpath(
+                      rwdts.XactRspCode.NACK,
+                      self.__class__.SCALE_OUT_OUTPUT_XPATH)
+
+        scale_in_hdl = rift.tasklets.DTS.RegistrationHandler(
+              on_prepare=on_scale_in_prepare)
+        scale_out_hdl = rift.tasklets.DTS.RegistrationHandler(
+              on_prepare=on_scale_out_prepare)
+
+        with self.dts.group_create() as group:
+            group.register(
+                  xpath=self.__class__.SCALE_IN_INPUT_XPATH,
+                  handler=scale_in_hdl,
+                  flags=rwdts.Flag.PUBLISHER)
+            group.register(
+                  xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
+                  handler=scale_out_hdl,
+                  flags=rwdts.Flag.PUBLISHER)
+
+
+class NsmTasklet(rift.tasklets.Tasklet):
+    """
+    The network service manager  tasklet
+    """
+    def __init__(self, *args, **kwargs):
+        super(NsmTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("nsm")
+
+        self._dts = None
+        self._nsm = None
+
+        self._ro_plugin_selector = None
+        self._vnffgmgr = None
+
+        self._nsr_handler = None
+        self._vnfr_pub_handler = None
+        self._vlr_pub_handler = None
+        self._vnfd_pub_handler = None
+        self._scale_cfg_handler = None
+
+        self._records_publisher_proxy = None
+
+    def start(self):
+        """ The task start callback """
+        super(NsmTasklet, self).start()
+        self.log.info("Starting NsmTasklet")
+
+        self.log.debug("Registering with dts")
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwNsmYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def stop(self):
+        try:
+            self._dts.deinit()
+        except Exception:
+            print("Caught Exception in NSM stop:", sys.exc_info()[0])
+            raise
+
+    def on_instance_started(self):
+        """ Task instance started callback """
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def init(self):
+        """ Task init callback """
+        self.log.debug("Got instance started callback")
+
+        self.log.debug("creating config account handler")
+
+        self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop)
+        yield from self._nsr_pub_handler.register()
+
+        self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop)
+        yield from self._vnfr_pub_handler.register()
+
+        self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop)
+        yield from self._vlr_pub_handler.register()
+
+        manifest = self.tasklet_info.get_pb_manifest()
+        use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
+        ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+        ssl_key = manifest.bootstrap_phase.rwsecurity.key
+
+        self._vnfd_pub_handler = publisher.VnfdPublisher(use_ssl, ssl_cert, ssl_key, self.loop)
+
+        self._records_publisher_proxy = NsmRecordsPublisherProxy(
+                self._dts,
+                self.log,
+                self.loop,
+                self._nsr_pub_handler,
+                self._vnfr_pub_handler,
+                self._vlr_pub_handler,
+                )
+
+        # Register the NSM to receive the nsm plugin
+        # when cloud account is configured
+        self._ro_plugin_selector = cloud.ROAccountPluginSelector(
+                self._dts,
+                self.log,
+                self.loop,
+                self._records_publisher_proxy,
+                )
+        yield from self._ro_plugin_selector.register()
+
+        self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
+                self._log,
+                self._dts,
+                self.log_hdl)
+
+        yield from self._cloud_account_handler.register()
+
+        self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop)
+        yield from self._vnffgmgr.register()
+
+        self._nsm = NsManager(
+                self._dts,
+                self.log,
+                self.loop,
+                self._nsr_pub_handler,
+                self._vnfr_pub_handler,
+                self._vlr_pub_handler,
+                self._ro_plugin_selector,
+                self._vnffgmgr,
+                self._vnfd_pub_handler,
+                self._cloud_account_handler
+                )
+
+        yield from self._nsm.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ Task run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self.log.debug("Changing state to %s", next_state)
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/rwvnffgmgr.py
new file mode 100755 (executable)
index 0000000..0ebe9df
--- /dev/null
@@ -0,0 +1,422 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwsdnYang,
+    RwTypes,
+    ProtobufC,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rift.tasklets
+
+class SdnGetPluginError(Exception):
+    """ Error while fetching SDN plugin """
+    pass
+
+
+class SdnGetInterfaceError(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+
+class SdnAccountError(Exception):
+    """ Error while creating/deleting/updating SDN Account"""
+    pass
+
+class VnffgrDoesNotExist(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+class VnffgrAlreadyExist(Exception):
+    """ Vnffgr already exists Error"""
+    pass
+
+class VnffgrCreationFailed(Exception):
+    """ Error while creating VNFFGR"""
+    pass
+
+
+class VnffgrUpdateFailed(Exception):
+    """ Error while updating VNFFGR"""
+    pass
+
+class VnffgMgr(object):
+    """ Implements the interface to backend plugins to fetch topology """
+    def __init__(self, dts, log, log_hdl, loop):
+        self._account = {}
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._sdn = {}
+        self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self)
+        self._vnffgr_list = {}
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._sdn_handler.register()
+
+    def set_sdn_account(self,account):
+        if (account.name in self._account):
+            self._log.error("SDN Account is already set")
+        else:
+            sdn_account           = RwsdnYang.SDNAccount()
+            sdn_account.from_dict(account.as_dict())
+            sdn_account.name = account.name
+            self._account[account.name] = sdn_account
+            self._log.debug("Account set is %s , %s",type(self._account), self._account)
+
+    def del_sdn_account(self, name):
+        self._log.debug("Account deleted is %s , %s", type(self._account), name)
+        del self._account[name]
+
+    def update_sdn_account(self,account):
+        self._log.debug("Account updated is %s , %s", type(self._account), account)
+        if account.name in self._account:
+            sdn_account = self._account[account.name]
+
+            sdn_account.from_dict(
+                account.as_dict(),
+                ignore_missing_keys=True,
+                )
+            self._account[account.name] = sdn_account
+
+    def get_sdn_account(self, name):
+        """
+        Creates an object for class RwsdnYang.SdnAccount()
+        """
+        if (name in self._account):
+            return self._account[name]
+        else:
+            self._log.error("SDN account is not configured")
+
+
+    def get_sdn_plugin(self,name):
+        """
+        Loads rw.sdn plugin via libpeas
+        """
+        if (name in self._sdn):
+            return self._sdn[name]
+        account = self.get_sdn_account(name)
+        plugin_name = getattr(account, account.account_type).plugin_name
+        self._log.debug("SDN plugin being created")
+        plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0')
+        engine, info, extension = plugin()
+
+        self._sdn[name] = plugin.get_interface("Topology")
+        try:
+            rc = self._sdn[name].init(self._log_hdl)
+            assert rc == RwStatus.SUCCESS
+        except:
+            self._log.error("ERROR:SDN plugin instantiation failed ")
+        else:
+            self._log.debug("SDN plugin successfully instantiated")
+        return self._sdn[name]
+
+    def fetch_vnffgr(self,vnffgr_id):
+        if vnffgr_id not in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s not present in VNFFGMgr", vnffgr_id)
+            msg = "VNFFGR with id {} not present in VNFFGMgr".format(vnffgr_id)
+            raise VnffgrDoesNotExist(msg)
+        self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account)
+        vnffgr = self._vnffgr_list[vnffgr_id].deep_copy()
+        self._log.debug("VNFFGR for id %s is %s",vnffgr_id,vnffgr)
+        return vnffgr
+
+    def create_vnffgr(self, vnffgr,classifier_list,sff_list):
+        """
+        """
+        self._log.debug("Received VNFFG chain Create msg %s",vnffgr)
+        if vnffgr.id in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s already present in VNFFGMgr", vnffgr.id)
+            vnffgr.operational_status = 'failed'
+            msg = "VNFFGR with id {} already present in VNFFGMgr".format(vnffgr.id)
+            raise VnffgrAlreadyExist(msg)
+
+        self._vnffgr_list[vnffgr.id] = vnffgr
+        vnffgr.operational_status = 'init'
+        if len(self._account) == 0:
+            self._log.error("SDN Account not configured")
+            vnffgr.operational_status = 'failed'
+            return
+        if vnffgr.sdn_account:
+            sdn_acct_name = vnffgr.sdn_account
+        else:
+            self._log.error("SDN Account is not associated to create VNFFGR")
+            # TODO Fail the VNFFGR creation if SDN account is not associated
+            #vnffgr.operational_status = 'failed'
+            #msg = "SDN Account is not associated to create VNFFGR"
+            #raise VnffgrCreationFailed(msg)
+            sdn_account = [sdn_account.name for _,sdn_account in self._account.items()]
+            sdn_acct_name = sdn_account[0]
+            vnffgr.sdn_account = sdn_acct_name
+        sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
+
+        for rsp in vnffgr.rsp:
+            vnffg = RwsdnYang.VNFFGChain()
+            vnffg.name = rsp.name
+            vnffg.classifier_name = rsp.classifier_name
+
+            vnfr_list = list()
+            for index,cp_ref in enumerate(rsp.vnfr_connection_point_ref):
+                cpath = vnffg.vnf_chain_path.add()
+                cpath.order=cp_ref.hop_number
+                cpath.service_function_type = cp_ref.service_function_type
+                cpath.nsh_aware=True
+                cpath.transport_type = 'vxlan-gpe'
+
+                vnfr=cpath.vnfr_ids.add()
+                vnfr.vnfr_id = cp_ref.vnfr_id_ref
+                vnfr.vnfr_name = cp_ref.vnfr_name_ref
+                vnfr.mgmt_address = cp_ref.connection_point_params.mgmt_address
+                vnfr.mgmt_port = 5000
+                vnfr_list.append(vnfr)
+            
+                vdu = vnfr.vdu_list.add()
+                vdu.name = cp_ref.connection_point_params.name
+                vdu.port_id = cp_ref.connection_point_params.port_id
+                vdu.vm_id = cp_ref.connection_point_params.vm_id
+                vdu.address = cp_ref.connection_point_params.address
+                vdu.port =  cp_ref.connection_point_params.port
+
+            for sff in sff_list.values():
+                _sff = vnffg.sff.add()
+                _sff.from_dict(sff.as_dict())
+                if sff.function_type == 'SFF':
+                    for vnfr in vnfr_list:
+                        vnfr.sff_name = sff.name
+                self._log.debug("Recevied SFF %s, Created SFF is %s",sff, _sff)
+
+            self._log.debug("VNFFG chain msg is %s",vnffg)
+            rc,rs = sdn_plugin.create_vnffg_chain(self._account[sdn_acct_name],vnffg)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                vnffgr.operational_status = 'failed'
+                msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
+                raise VnffgrCreationFailed(msg)
+
+            self._log.info("VNFFG chain created successfully for rsp with id %s",rsp.id)
+
+
+        meta = {}
+        if(len(classifier_list) == 2):
+            meta[vnffgr.classifier[0].id] = '0x' + ''.join(str("%0.2X"%int(i)) for i in vnffgr.classifier[1].ip_address.split('.'))
+            meta[vnffgr.classifier[1].id] = '0x' + ''.join(str("%0.2X"%int(i)) for i in vnffgr.classifier[0].ip_address.split('.'))
+            
+        self._log.debug("VNFFG Meta VNFFG chain is {}".format(meta))
+        
+        for classifier in classifier_list:
+            vnffgr_cl = [_classifier  for _classifier in vnffgr.classifier if classifier.id == _classifier.id]
+            if len(vnffgr_cl) > 0:
+                cl_rsp_name = vnffgr_cl[0].rsp_name
+            else:
+                self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name)
+                continue
+            vnffgcl = RwsdnYang.VNFFGClassifier()
+            vnffgcl.name = classifier.name
+            vnffgcl.rsp_name = cl_rsp_name
+            vnffgcl.port_id = vnffgr_cl[0].port_id
+            vnffgcl.vm_id = vnffgr_cl[0].vm_id
+            # Get the symmetric classifier endpoint ip and set it in nsh ctx1
+            
+            vnffgcl.vnffg_metadata.ctx1 =  meta.get(vnffgr_cl[0].id,'0') 
+            vnffgcl.vnffg_metadata.ctx2 = '0'
+            vnffgcl.vnffg_metadata.ctx3 = '0'
+            vnffgcl.vnffg_metadata.ctx4 = '0'
+            if vnffgr_cl[0].has_field('sff_name'):
+                vnffgcl.sff_name = vnffgr_cl[0].sff_name
+            for index,match_rule in enumerate(classifier.match_attributes):
+                acl = vnffgcl.match_attributes.add()
+                #acl.name = vnffgcl.name + str(index)
+                acl.name = match_rule.id
+                acl.ip_proto  = match_rule.ip_proto
+                acl.source_ip_address = match_rule.source_ip_address + '/32'
+                acl.source_port = match_rule.source_port
+                acl.destination_ip_address = match_rule.destination_ip_address + '/32'
+                acl.destination_port = match_rule.destination_port
+
+            self._log.debug(" Creating VNFFG Classifier Classifier %s for RSP: %s",vnffgcl.name,vnffgcl.rsp_name)
+            rc,rs = sdn_plugin.create_vnffg_classifier(self._account[sdn_acct_name],vnffgcl)
+            if rc != RwTypes.RwStatus.SUCCESS:
+                self._log.error("VNFFG Classifier cretaion failed for Classifier %s for RSP ID: %s",classifier.name,classifier.rsp_id_ref)
+                #vnffgr.operational_status = 'failed'
+                #msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
+                #raise VnffgrCreationFailed(msg)
+
+        vnffgr.operational_status = 'running'
+        self.update_vnffgrs(vnffgr.sdn_account)
+        return vnffgr
+
+    def update_vnffgrs(self,sdn_acct_name):
+        """
+        Update VNFFGR by reading data from SDN Plugin
+        """
+        sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
+        rc,rs = sdn_plugin.get_vnffg_rendered_paths(self._account[sdn_acct_name])
+        if rc != RwTypes.RwStatus.SUCCESS:
+            msg = "Reading of VNFFGR from SDN Plugin failed"
+            raise VnffgrUpdateFailed(msg)
+
+        vnffgr_list = [_vnffgr for _vnffgr in self._vnffgr_list.values()  if _vnffgr.sdn_account == sdn_acct_name and _vnffgr.operational_status == 'running']
+
+        for _vnffgr in vnffgr_list:
+            for _vnffgr_rsp in _vnffgr.rsp:
+                vnffg_rsp_list = [vnffg_rsp for vnffg_rsp in rs.vnffg_rendered_path if vnffg_rsp.name == _vnffgr_rsp.name]
+                if vnffg_rsp_list is not None and len(vnffg_rsp_list) > 0:
+                    vnffg_rsp = vnffg_rsp_list[0]
+                    if len(vnffg_rsp.rendered_path_hop) != len(_vnffgr_rsp.vnfr_connection_point_ref):
+                        _vnffgr.operational_status = 'failed'
+                        self._log.error("Received hop count %d doesnt match the VNFFGD hop count %d", len(vnffg_rsp.rendered_path_hop),
+                                         len(_vnffgr_rsp.vnfr_connection_point_ref))
+                        msg = "Fetching of VNFFGR with id {} failed".format(_vnffgr.id)
+                        raise VnffgrUpdateFailed(msg)
+                    _vnffgr_rsp.path_id =  vnffg_rsp.path_id
+                    for index, rendered_hop in enumerate(vnffg_rsp.rendered_path_hop):
+                        for  vnfr_cp_ref in _vnffgr_rsp.vnfr_connection_point_ref:
+                            if rendered_hop.vnfr_name == vnfr_cp_ref.vnfr_name_ref:
+                               vnfr_cp_ref.hop_number = rendered_hop.hop_number
+                               vnfr_cp_ref.service_index = rendered_hop.service_index
+                               vnfr_cp_ref.service_function_forwarder.name = rendered_hop.service_function_forwarder.name
+                               vnfr_cp_ref.service_function_forwarder.ip_address = rendered_hop.service_function_forwarder.ip_address
+                               vnfr_cp_ref.service_function_forwarder.port = rendered_hop.service_function_forwarder.port
+                else:
+                    _vnffgr.operational_status = 'failed'
+                    self._log.error("VNFFGR RSP with name %s in VNFFG %s not found",_vnffgr_rsp.name, _vnffgr.id)
+                    msg = "Fetching of VNFFGR with name {} failed".format(_vnffgr_rsp.name)
+                    raise VnffgrUpdateFailed(msg)
+
+
+    def terminate_vnffgr(self,vnffgr_id,sdn_account_name = None):
+        """
+        Deletet the VNFFG chain
+        """
+        if vnffgr_id not in self._vnffgr_list:
+            self._log.error("VNFFGR with id %s not present in VNFFGMgr during termination", vnffgr_id)
+            msg = "VNFFGR with id {} not present in VNFFGMgr during termination".format(vnffgr_id)
+            return
+            #raise VnffgrDoesNotExist(msg)
+        self._log.info("Received VNFFG chain terminate for id %s",vnffgr_id)
+        if sdn_account_name is None:
+            sdn_account = [sdn_account.name for _,sdn_account in self._account.items()]
+            sdn_account_name = sdn_account[0]
+        sdn_plugin = self.get_sdn_plugin(sdn_account_name)
+        sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id)
+        sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id)
+        del self._vnffgr_list[vnffgr_id]
+
+class SDNAccountDtsHandler(object):
+    XPATH = "C,/rw-sdn:sdn-account"
+
+    def __init__(self, dts, log, parent):
+        self._dts = dts
+        self._log = log
+        self._parent = parent
+
+        self._sdn_account = {}
+
+    def _set_sdn_account(self, account):
+        self._log.info("Setting sdn account: {}".format(account))
+        if account.name in self._sdn_account:
+            self._log.error("SDN Account with name %s already exists. Ignoring config", account.name);
+        self._sdn_account[account.name]  = account
+        self._parent.set_sdn_account(account)
+
+    def _del_sdn_account(self, account_name):
+        self._log.info("Deleting sdn account: {}".format(account_name))
+        del self._sdn_account[account_name]
+
+        self._parent.del_sdn_account(account_name)
+
+    def _update_sdn_account(self, account):
+        self._log.info("Updating sdn account: {}".format(account))
+        # No need to update locally saved sdn_account's updated fields, as they
+        # are not used anywhere. Call the parent's update callback.
+        self._parent.update_sdn_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return RwTypes.RwStatus.SUCCESS
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for SDN Account config """
+
+            self._log.info("SDN Cloud account config received: %s", msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete the sdn account record
+                self._del_sdn_account(msg.name)
+            else:
+                # If the account already exists, then this is an update.
+                if msg.name in self._sdn_account:
+                    self._log.debug("SDN account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        errmsg = "Cannot update SDN account's account-type."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Update the sdn account record
+                    self._update_sdn_account(msg)
+                else:
+                    self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        errmsg = "New SDN account must contain account-type field."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Set the sdn account record
+                    self._set_sdn_account(msg)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+
+        self._log.debug("Registering for Sdn Account config using xpath: %s",
+                        SDNAccountDtsHandler.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            acg.register(
+                    xpath=SDNAccountDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                    on_prepare=on_prepare
+                    )
+
+
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/scale_group.py
new file mode 100644 (file)
index 0000000..8bbf894
--- /dev/null
@@ -0,0 +1,280 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import time
+
+from enum import Enum
+
+from gi.repository import NsdYang, NsrYang
+
+
+class ScalingGroupIndexExists(Exception):
+    pass
+
+
+class ScaleGroupTrigger(Enum):
+    """ Trigger for scaling config """
+    PRE_SCALE_IN = 1
+    POST_SCALE_IN = 2
+    PRE_SCALE_OUT = 3
+    POST_SCALE_OUT = 4
+
+
+class ScaleGroupState(Enum):
+    """ Scaling group state  """
+    RUNNING = 1
+    SCALING_IN = 2
+    SCALING_OUT = 3
+
+
+class ScalingGroup(object):
+    """ This represents a configured NSR scaling group """
+    def __init__(self, log, group_msg):
+        """ Create a ScalingGroup instance
+
+        This class is responsible for representing a configured scaling group
+        which is present within an NSR.
+
+        :param log: A logger instance
+        :param group_msg: A NSD scaling group pb message
+        """
+        self._log = log
+        self._group_msg = group_msg
+
+        self._instances = {}
+
+    def __str__(self):
+        return "ScalingGroup(%s)" % self.name
+
+    @property
+    def name(self):
+        """ Name of the scaling group """
+        return self._group_msg.name
+
+    @property
+    def state(self):
+        """ State of the scaling group """
+        state = ScaleGroupState.RUNNING
+        for instance in self._instances.values():
+            if instance.operational_status in ["init", "vnf_init_phase"]:
+                self._log.debug("Scaling instance %s in scaling-out state: %s",
+                                instance, instance.operational_status)
+                state = ScaleGroupState.SCALING_OUT
+
+            elif instance.operational_status in ["terminate", "vnf_terminate_phase"]:
+                self._log.debug("Scaling instance %s in scaling-in state: %s",
+                                instance, instance.operational_status)
+                state = ScaleGroupState.SCALING_IN
+
+        return state
+
+    @property
+    def vnf_index_count_map(self):
+        """ The mapping of member_vnf_index_ref to count"""
+        return {mbr.member_vnf_index_ref: mbr.count for mbr in self._group_msg.vnfd_member}
+
+    @property
+    def group_msg(self):
+        """ Return the scale group PB message """
+        return self._group_msg
+
+    @property
+    def min_instance_count(self):
+        """ Minimum (and default) number of instance of the scaling group """
+        return self._group_msg.min_instance_count
+
+    @property
+    def max_instance_count(self):
+        """ Maximum number of instance of the scaling group """
+        return self._group_msg.max_instance_count
+
+    def create_record_msg(self):
+        """ Returns a NSR Scaling group record """
+        msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(
+                scaling_group_name_ref=self.name,
+                )
+
+        for instance in self.instances:
+            msg.instance.append(instance.create_record_msg())
+
+        return msg
+
+    @property
+    def instances(self):
+        return self._instances.values()
+
+    def get_instance(self, instance_id):
+        """ Get a scaling group instance
+
+        :param instance_id: The instance's instance_id
+        """
+        return self._instances[instance_id]
+
+    def create_instance(self, instance_id, is_default=False):
+        """ Create a scaling group instance
+
+        :param instance_id: The new instance's instance_id
+        """
+        self._log.debug("Creating %s instance instance_id %s ", self, instance_id)
+
+        if instance_id in self._instances:
+            raise ScalingGroupIndexExists("%s instance_id %s already exists" % (self, instance_id))
+
+        instance = ScalingGroupInstance(
+                log=self._log,
+                group_name=self.name,
+                instance_id=instance_id,
+                is_default=is_default,
+                )
+
+        self._instances[instance_id] = instance
+
+        return instance
+
+    def delete_instance(self, instance_id):
+        self._log.debug("Deleting %s instance instance_id %s ", self, instance_id)
+        del self._instances[instance_id]
+
+    def trigger_map(self, trigger):
+        trig_map = {
+            NsdYang.ScalingTrigger.PRE_SCALE_IN   : 'pre_scale_in',
+            NsdYang.ScalingTrigger.POST_SCALE_IN  : 'post_scale_in',
+            NsdYang.ScalingTrigger.PRE_SCALE_OUT  : 'pre_scale_out',
+            NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post_scale_out',
+        }
+
+        try:
+            return trig_map[trigger]
+        except Exception as e:
+            self._log.error("Unknown scaling group trigger passed: {}".format(trigger))
+            self._log.exception(e)
+
+    def trigger_config(self, trigger):
+        """ Get the config action for the trigger """
+        self._log.debug("Trigger config {}: {}".format(trigger, self._group_msg))
+        trig = self.trigger_map(trigger)
+        if trig is None:
+            return
+
+        for config in self._group_msg.scaling_config_action:
+            if trig == config.trigger:
+                return config
+
+
+class ScalingGroupInstance(object):
+    """  This class represents a configured NSR Scaling Group instance"""
+
+    valid_status_list = (
+      "init",
+      "vnf_init_phase",
+      "running",
+      "terminate",
+      "vnf_terminate_phase",
+      "terminated",
+      "failed",
+      )
+
+    valid_config_status_list = (
+        "configuring",
+        "configured",
+        "failed",
+    )
+
+    def __init__(self, log, group_name, instance_id, is_default=False):
+        self._log = log
+        self._group_name = group_name
+        self._instance_id = instance_id
+        self._is_default = is_default
+
+        self._vnfrs = {}
+
+        self._create_time = int(time.time())
+        self._op_status = "init"
+        self._config_status = "configuring"
+        self._config_err_msg = None
+
+    def __str__(self):
+        return "ScalingGroupInstance(%s #%s)" % (self._group_name, self.instance_id)
+
+    @property
+    def operational_status(self):
+        return self._op_status
+
+    @operational_status.setter
+    def operational_status(self, op_status):
+        if op_status not in ScalingGroupInstance.valid_status_list:
+            raise ValueError("Invalid scaling group instance status: %s", op_status)
+
+        self._op_status = op_status
+
+    @property
+    def config_status(self):
+        return self._config_status
+
+    @config_status.setter
+    def config_status(self, status):
+        if status not in ScalingGroupInstance.valid_config_status_list:
+            raise ValueError("%s, invalid status: %s",
+                             self, status)
+
+        self._config_status = status
+
+    @property
+    def config_err_msg(self):
+        return self._config_err_msg
+
+    @config_err_msg.setter
+    def config_err_msg(self, msg):
+        if self.config_err_msg is not None:
+            self._log.info("%s, overwriting previous config error msg '%s' with '%s'",
+                           self, self.config_err_msg, msg)
+
+        self._config_err_msg = msg
+
+    @property
+    def instance_id(self):
+        return self._instance_id
+
+    @property
+    def is_default(self):
+        return self._is_default
+
+    @property
+    def vnfrs(self):
+        """ Return all VirtualNetworkFunctionRecord's that have been added"""
+        return self._vnfrs.values()
+
+    def create_record_msg(self):
+        msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
+                instance_id=self._instance_id,
+                create_time=self._create_time,
+                op_status=self._op_status,
+                config_status=self._config_status,
+                error_msg=self._config_err_msg,
+                is_default=self._is_default
+                )
+
+        for vnfr in self.vnfrs:
+            msg.vnfrs.append(vnfr.id)
+
+        return msg
+
+    def add_vnfr(self, vnfr):
+        """ Add a VirtualNetworkFunctionRecord"""
+        self._log.debug("Added %s to %s", vnfr, self)
+        self._vnfrs[vnfr.id] = vnfr
+
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/so_endpoint_cfg.xml
new file mode 100644 (file)
index 0000000..ef09f1e
--- /dev/null
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="1">
+  <data>
+    <cm-config xmlns="http://riftio.com/ns/riftware-1.0/rw-conman">
+      <initiate-nsr-cfg></initiate-nsr-cfg>
+    </cm-config>
+  </data>
+</rpc-reply>
diff --git a/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py b/rwlaunchpad/plugins/rwnsm/rift/tasklets/rwnsmtasklet/xpath.py
new file mode 100755 (executable)
index 0000000..11b7127
--- /dev/null
@@ -0,0 +1,365 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import re
+
+
+class Attribute(collections.namedtuple("Attribute", "module name")):
+    def __repr__(self):
+        return "{}:{}".format(self.module, self.name)
+
+
+class ListElement(collections.namedtuple("List", "module name key value")):
+    def __repr__(self):
+        return "{}:{}[{}={}]".format(self.module, self.name, self.key, self.value)
+
+
+def tokenize(xpath):
+    """Return a list of tokens representing an xpath
+
+    The types of xpaths that this selector supports is extremely limited.
+    The xpath is required to be an absolute path delimited by a
+    forward-slash. Each of the parts (elements between delimiters) is
+    treated as one of two possible types:
+
+        - an attribute
+        - a list element
+
+    An attribute is a normal python attribute on an object. A list element
+    is an element within a list, which is identified by a key value (like a
+    yang list, although this is more properly a dict in python).
+
+    Each attribute is expected to have the form,
+
+        <namespace>:<variable-name>
+
+    A valid variable name (or namespace) follows the python regular expression,
+
+        [a-zA-Z0-9-_]+
+
+    A list entry has the form,
+
+        <namespace>:<variable-name>[<namespace>:<variable-name>=<value>]
+
+    The expression in the square brackets is the key of the required
+    element, and the value that that key must have.
+
+    Arguments:
+        xpath - a string containing an xpath expression
+
+    Raises:
+        A ValueError is raised if the xpath cannot be parsed.
+
+    Returns:
+        a list of tokens
+
+    """
+    # define the symbols that are valid for a variable name in yang
+    name = "[a-zA-Z0-9-_]+"
+
+    # define a set of regular expressions for parsing the xpath
+    pattern_attribute = re.compile("({t}):({t})$".format(t=name))
+    pattern_key_value = re.compile("^{t}:({t})\s*=\s*(.*)$".format(t=name))
+    pattern_quote = re.compile("^[\'\"](.*)[\'\"]$")
+    pattern_list = re.compile("^(.*)\[(.*)\]$")
+
+    def dash_to_underscore(text):
+        return text.replace('-', '_')
+
+    # Iterate through the parts of the xpath (NB: because the xpaths are
+    # required to be absolute paths, the first character is going to be the
+    # forward slash. As a result, when the string is split, the first
+    # element with be an empty string).
+    tokens = list()
+    for part in xpath.split("/")[1:]:
+
+        # Test the part to see if it is a attribute
+        result = pattern_attribute.match(part)
+        if result is not None:
+            module, name = result.groups()
+
+            # Convert the dashes to underscores
+            name = dash_to_underscore(name)
+            module = dash_to_underscore(module)
+
+            tokens.append(Attribute(module, name))
+
+            continue
+
+        # Test the part to see if it is a list
+        result = pattern_list.match(part)
+        if result is not None:
+            attribute, keyvalue = result.groups()
+
+            module, name = pattern_attribute.match(attribute).groups()
+            key, value = pattern_key_value.match(keyvalue).groups()
+
+            # Convert the dashes to underscore (but not in the key value)
+            key = dash_to_underscore(key)
+            name = dash_to_underscore(name)
+            module = dash_to_underscore(module)
+
+            result = pattern_quote.match(value)
+            if result is not None:
+                value = result.group(1)
+
+            tokens.append(ListElement(module, name, key, value))
+
+            continue
+
+        raise ValueError("cannot parse '{}'".format(part))
+
+    return tokens
+
+
+class XPathAttribute(object):
+    """
+    This class is used to represent a reference to an attribute. If you use
+    getattr on an attribute, it may give you the value of the attribute rather
+    than a reference to it. What is really wanted is a representation of the
+    attribute so that its value can be both retrieved and set. That is what
+    this class provides.
+    """
+
+    def __init__(self, obj, name):
+        """Create an instance of XPathAttribute
+
+        Arguments:
+            obj  - the object containing the attribute
+            name - the name of an attribute
+
+        Raises:
+            A ValueError is raised if the provided object does not have the
+            associated attribute.
+
+        """
+        if not hasattr(obj, name):
+            msg = "The provided object does not contain the associated attribute"
+            raise ValueError(msg)
+
+        self.obj = obj
+        self.name = name
+
+    def __repr__(self):
+        return self.value
+
+    @property
+    def value(self):
+        return getattr(self.obj, self.name)
+
+    @value.setter
+    def value(self, value):
+        """Set the value of the attribute
+
+        Arguments:
+            value - the new value that the attribute should take
+
+        Raises:
+            An TypeError is raised if the provided value cannot be cast the
+            current type of the attribute.
+
+        """
+        attr_type = type(self.value)
+        attr_value = value
+
+        # The only way we can currently get the type of the atrribute is if it
+        # has an existing value. So if the attribute has an existing value,
+        # cast the value to the type of the attribute value.
+        if attr_type is not type(None):
+            try:
+                attr_value = attr_type(attr_value)
+
+            except ValueError:
+                msg = "expected type '{}', but got '{}' instead"
+                raise TypeError(msg.format(attr_type.__name__, type(value).__name__))
+
+        setattr(self.obj, self.name, attr_value)
+
+
+class XPathElement(XPathAttribute):
+    """
+    This class is used to represent a reference to an element within a list.
+    Unlike scalar attributes, it is not entirely necessary to have this class
+    to represent the attribute because the element cannot be a simple scalar.
+    However, this class is used because it creates a uniform interface that can
+    be used by the setxattr and getxattr functions.
+    """
+
+    def __init__(self, container, key, value):
+        """Create an instance of XPathElement
+
+        Arguments:
+            container - the object that contains the element
+            key       - the name of the field that is used to identify the
+                        element
+            value     - the value of the key that identifies the element
+
+        """
+        self._container = container
+        self._value = value
+        self._key = key
+
+    @property
+    def value(self):
+        for element in self._container:
+            if getattr(element, self._key) == self._value:
+                return element
+
+        raise ValueError("specified element does not exist")
+
+    @value.setter
+    def value(self, value):
+        existing = None
+        for element in self._container:
+            if getattr(element, self._key) == self._value:
+                existing = element
+                break
+
+        if existing is not None:
+            self._container.remove(existing)
+
+        self._container.append(value)
+
+
+class XPathSelector(object):
+    def __init__(self, xpath):
+        """Creates an instance of XPathSelector
+
+        Arguments:
+            xpath - a string containing an xpath expression
+
+        """
+        self._tokens = tokenize(xpath)
+
+
+    def __call__(self, obj):
+        """Returns a reference to an attribute on the provided object
+
+        Using the defined xpath, an attribute is selected from the provided
+        object and returned.
+
+        Arguments:
+            obj - a GI object
+
+        Raises:
+            A ValueError is raised if the specified element in a list cannot be
+            found.
+
+        Returns:
+            an XPathAttribute that reference the specified attribute
+
+        """
+        current = obj
+        for token in self._tokens[:-1]:
+            # If the object is contained within a list, we will need to iterate
+            # through the tokens until we find a token that is a field of the
+            # object.
+            if token.name not in current.fields:
+                if current is obj:
+                    continue
+
+                raise ValueError('cannot find attribute {}'.format(token.name))
+
+            # If the token is a ListElement, try to find the matching element
+            if isinstance(token, ListElement):
+                for element in getattr(current, token.name):
+                    if getattr(element, token.key) == token.value:
+                        current = element
+                        break
+
+                else:
+                    raise ValueError('unable to find {}'.format(token.value))
+
+            else:
+                # Attribute the variable matching the name of the token
+                current = getattr(current, token.name)
+
+        # Process the final token
+        token = self._tokens[-1]
+
+        # If the token represents a list element, find the element in the list
+        # and return an XPathElement
+        if isinstance(token, ListElement):
+            container = getattr(current, token.name)
+            for element in container:
+                if getattr(element, token.key) == token.value:
+                    return XPathElement(container, token.key, token.value)
+
+            else:
+                raise ValueError('unable to find {}'.format(token.value))
+
+        # Otherwise, return the object as an XPathAttribute
+        return XPathAttribute(current, token.name)
+
+    @property
+    def tokens(self):
+        """The tokens in the xpath expression"""
+        return self._tokens
+
+
+# A global cache to avoid repeated parsing of known xpath expressions
+__xpath_cache = dict()
+
+
+def reset_cache():
+    global __xpath_cache
+    __xpath_cache = dict()
+
+
+def getxattr(obj, xpath):
+    """Return an attribute on the provided object
+
+    The xpath is parsed and used to identify an attribute on the provided
+    object. The object is expected to be a GI object where each attribute that
+    is accessible via an xpath expression is contained in the 'fields'
+    attribute of the object (NB: this is not true of GI lists, which do not
+    have a 'fields' attribute).
+
+    A selector is create for each xpath and used to find the specified
+    attribute. The accepted xpath expressions are those supported by the
+    XPathSelector class. The parsed xpath expression is cached so that
+    subsequent parsing is unnecessary. However, selectors are stored in a
+    global dictionary and this means that this function is not thread-safe.
+
+    Arguments:
+        obj   - a GI object
+        xpath - a string containing an xpath expression
+
+    Returns:
+        an attribute on the provided object
+
+    """
+    if xpath not in __xpath_cache:
+        __xpath_cache[xpath] = XPathSelector(xpath)
+
+    return __xpath_cache[xpath](obj).value
+
+
+def setxattr(obj, xpath, value):
+    """Set the attribute referred to by the xpath
+
+    Arguments:
+        obj   - a GI object
+        xpath - a string containing an xpath expression
+        value - the new value of the attribute
+
+    """
+    if xpath not in __xpath_cache:
+        __xpath_cache[xpath] = XPathSelector(xpath)
+
+    __xpath_cache[xpath](obj).value = value
diff --git a/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py b/rwlaunchpad/plugins/rwnsm/rwnsmtasklet.py
new file mode 100755 (executable)
index 0000000..3b5c072
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwnsmtasklet
+
+class Tasklet(rift.tasklets.rwnsmtasklet.NsmTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt b/rwlaunchpad/plugins/rwresmgr/CMakeLists.txt
new file mode 100644 (file)
index 0000000..e757e43
--- /dev/null
@@ -0,0 +1,41 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwresmgrtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_config.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_core.py
+    rift/tasklets/${TASKLET_NAME}/rwresmgr_events.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwresmgr/Makefile b/rwlaunchpad/plugins/rwresmgr/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/__init__.py
new file mode 100644 (file)
index 0000000..1ee19e3
--- /dev/null
@@ -0,0 +1 @@
+from .rwresmgrtasklet import ResMgrTasklet
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_config.py
new file mode 100644 (file)
index 0000000..5035b18
--- /dev/null
@@ -0,0 +1,115 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import time
+import uuid
+from enum import Enum
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.tasklets
+import rift.mano.cloud
+
+
+class ResourceMgrConfig(object):
+    XPATH_POOL_OPER_DATA = "D,/rw-resource-mgr:resource-pool-records"
+    def __init__(self, dts, log, rwlog_hdl, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._rwlog_hdl = rwlog_hdl
+        self._loop = loop
+        self._parent = parent
+
+        self._cloud_sub = None
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.register_resource_pool_operational_data()
+        self.register_cloud_account_config()
+
+    def register_cloud_account_config(self):
+        def on_add_cloud_account_apply(account):
+            self._log.debug("Received on_add_cloud_account: %s", account)
+            self._parent.add_cloud_account_config(account)
+
+        def on_delete_cloud_account_apply(account_name):
+            self._log.debug("Received on_delete_cloud_account_apply: %s", account_name)
+            self._parent.delete_cloud_account_config(account_name)
+
+        @asyncio.coroutine
+        def on_delete_cloud_account_prepare(account_name):
+            self._log.debug("Received on_delete_cloud_account_prepare: %s", account_name)
+            self._parent.delete_cloud_account_config(account_name, dry_run=True)
+
+        cloud_callbacks = rift.mano.cloud.CloudAccountConfigCallbacks(
+                on_add_apply=on_add_cloud_account_apply,
+                on_delete_apply=on_delete_cloud_account_apply,
+                on_delete_prepare=on_delete_cloud_account_prepare,
+                )
+
+        self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
+                self._dts, self._log, self._rwlog_hdl, cloud_callbacks
+                )
+        self._cloud_sub.register()
+
+    @asyncio.coroutine
+    def register_resource_pool_operational_data(self):
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            self._log.debug("ResourceMgr providing resource-pool information")
+            msg = RwResourceMgrYang.ResourcePoolRecords()
+
+            cloud_accounts = self._parent.get_cloud_account_names()
+            for cloud_account_name in cloud_accounts:
+                pools = self._parent.get_pool_list(cloud_account_name)
+                self._log.debug("Publishing information about cloud account %s %d resource pools",
+                                cloud_account_name, len(pools))
+
+                cloud_account_msg = msg.cloud_account.add()
+                cloud_account_msg.name = cloud_account_name
+                for pool in pools:
+                    pool_info = self._parent.get_pool_info(cloud_account_name, pool.name)
+                    cloud_account_msg.records.append(pool_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK,
+                                    ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+                                    msg=msg,)
+
+        self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s",
+                        ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+
+        handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+        response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+                                                 handler=handler,
+                                                 flags=rwdts.Flag.PUBLISHER)
+
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_core.py
new file mode 100644 (file)
index 0000000..d2897f8
--- /dev/null
@@ -0,0 +1,1480 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import uuid
+import collections
+import asyncio
+import concurrent.futures
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+
+class ResMgrCALNotPresent(Exception):
+    pass
+
+class ResMgrCloudAccountNotFound(Exception):
+    pass
+
+class ResMgrCloudAccountExists(Exception):
+    pass
+
+class ResMgrCloudAccountInUse(Exception):
+    pass
+
+class ResMgrDuplicatePool(Exception):
+    pass
+
+class ResMgrPoolNotAvailable(Exception):
+    pass
+
+class ResMgrPoolOperationFailed(Exception):
+    pass
+
+class ResMgrDuplicateEventId(Exception):
+    pass
+
+class ResMgrUnknownEventId(Exception):
+    pass
+
+class ResMgrUnknownResourceId(Exception):
+    pass
+
+class ResMgrResourceIdBusy(Exception):
+    pass
+
+class ResMgrResourceIdNotAllocated(Exception):
+    pass
+
+class ResMgrNoResourcesAvailable(Exception):
+    pass
+
+class ResMgrResourcesInitFailed(Exception):
+    pass
+
+class ResMgrCALOperationFailure(Exception):
+    pass
+
+
+
+class ResourceMgrCALHandler(object):
+    def __init__(self, loop, executor, log, log_hdl, account):
+        self._log = log
+        self._loop = loop
+        self._executor = executor
+        self._account = account.cal_account_msg
+        self._rwcal = account.cal
+        if account.account_type == 'aws':
+            self._subnets = ["172.31.97.0/24", "172.31.98.0/24", "172.31.99.0/24", "172.31.100.0/24", "172.31.101.0/24"]
+        else:
+            self._subnets = ["11.0.0.0/24",
+                             "12.0.0.0/24",
+                             "13.0.0.0/24",
+                             "14.0.0.0/24",
+                             "15.0.0.0/24",
+                             "16.0.0.0/24",
+                             "17.0.0.0/24",
+                             "18.0.0.0/24",
+                             "19.0.0.0/24",
+                             "20.0.0.0/24",
+                             "21.0.0.0/24",
+                             "22.0.0.0/24",]
+        self._subnet_ptr = 0
+
+    def _select_link_subnet(self):
+        subnet = self._subnets[self._subnet_ptr]
+        self._subnet_ptr += 1
+        if self._subnet_ptr == len(self._subnets):
+            self._subnet_ptr = 0
+        return subnet
+
+    @asyncio.coroutine
+    def create_virtual_network(self, req_params):
+        #rc, rsp = self._rwcal.get_virtual_link_list(self._account)
+        self._log.debug("Calling get_virtual_link_list API")
+        rc, rsp = yield from self._loop.run_in_executor(self._executor,
+                                                        self._rwcal.get_virtual_link_list,
+                                                        self._account)
+            
+        assert rc == RwStatus.SUCCESS
+
+        links = [vlink for vlink in rsp.virtual_link_info_list if vlink.name == req_params.name]
+        if links:
+            self._log.debug("Found existing virtual-network with matching name in cloud. Reusing the virtual-network with id: %s" %(links[0].virtual_link_id))
+            return ('precreated', links[0].virtual_link_id)
+        elif req_params.vim_network_name:
+            self._log.error("Virtual-network-allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist",
+                    self._account.name, req_params.vim_network_name)
+            raise ResMgrCALOperationFailure("Virtual-network allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist"
+                    %(self._account.name, req_params.vim_network_name))
+
+        params = RwcalYang.VirtualLinkReqParams()
+        params.from_dict(req_params.as_dict())
+        params.subnet = self._select_link_subnet()
+        #rc, rs = self._rwcal.create_virtual_link(self._account, params)
+        self._log.debug("Calling create_virtual_link API with params: %s" %(str(req_params)))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_virtual_link,
+                                                       self._account,
+                                                       params)
+        if rc.status != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-allocate operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+                    self._account.name, rc.error_msg, rc.traceback)
+            raise ResMgrCALOperationFailure("Virtual-network allocate operation failed for cloud account: %s (%s)"
+                    %(self._account.name, rc.error_msg))
+
+        return ('dynamic',rs)
+
+    @asyncio.coroutine
+    def delete_virtual_network(self, network_id):
+        #rc = self._rwcal.delete_virtual_link(self._account, network_id)
+        self._log.debug("Calling delete_virtual_link API with id: %s" %(network_id))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.delete_virtual_link,
+                                                   self._account,
+                                                   network_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-release operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            network_id)
+            raise ResMgrCALOperationFailure("Virtual-network release operation failed for cloud account: %s. ResourceId: %s" %(self._account.name, network_id))
+
+    @asyncio.coroutine        
+    def get_virtual_network_info(self, network_id):
+        #rc, rs = self._rwcal.get_virtual_link(self._account, network_id)
+        self._log.debug("Calling get_virtual_link_info API with id: %s" %(network_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_virtual_link,
+                                                       self._account,
+                                                       network_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            network_id)
+            raise ResMgrCALOperationFailure("Virtual-network-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, network_id))
+        return rs
+
+    @asyncio.coroutine
+    def create_virtual_compute(self, req_params):
+        #rc, rsp = self._rwcal.get_vdu_list(self._account)
+        self._log.debug("Calling get_vdu_list API")
+        rc, rsp = yield from self._loop.run_in_executor(self._executor,
+                                                        self._rwcal.get_vdu_list,
+                                                        self._account)
+        assert rc == RwStatus.SUCCESS
+        vdus = [vm for vm in rsp.vdu_info_list if vm.name == req_params.name]
+        if vdus:
+            self._log.debug("Found existing virtual-compute with matching name in cloud. Reusing the virtual-compute element with id: %s" %(vdus[0].vdu_id))
+            return vdus[0].vdu_id
+
+        params = RwcalYang.VDUInitParams()
+        params.from_dict(req_params.as_dict())
+
+        image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
+        params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
+
+        #rc, rs = self._rwcal.create_vdu(self._account, params)
+        self._log.debug("Calling create_vdu API with params %s" %(str(params)))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_vdu,
+                                                       self._account,
+                                                       params)
+
+        if rc.status != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-create operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+                    self._account.name, rc.error_msg, rc.traceback)
+            raise ResMgrCALOperationFailure("Virtual-compute-create operation failed for cloud account: %s (%s)"
+                    %(self._account.name, rc.error_msg))
+
+        return rs
+
+    @asyncio.coroutine
+    def modify_virtual_compute(self, req_params):
+        #rc = self._rwcal.modify_vdu(self._account, req_params)
+        self._log.debug("Calling modify_vdu API with params: %s" %(str(req_params)))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.modify_vdu,
+                                                   self._account,
+                                                   req_params)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-modify operation failed for cloud account: %s", self._account.name)
+            raise ResMgrCALOperationFailure("Virtual-compute-modify operation failed for cloud account: %s" %(self._account.name))
+
+    @asyncio.coroutine        
+    def delete_virtual_compute(self, compute_id):
+        #rc = self._rwcal.delete_vdu(self._account, compute_id)
+        self._log.debug("Calling delete_vdu API with id: %s" %(compute_id))
+        rc = yield from self._loop.run_in_executor(self._executor,
+                                                   self._rwcal.delete_vdu,
+                                                   self._account,
+                                                   compute_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            compute_id)
+            raise ResMgrCALOperationFailure("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
+
+    @asyncio.coroutine        
+    def get_virtual_compute_info(self, compute_id):
+        #rc, rs = self._rwcal.get_vdu(self._account, compute_id)
+        self._log.debug("Calling get_vdu API with id: %s" %(compute_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_vdu,
+                                                       self._account,
+                                                       compute_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s",
+                            self._account.name,
+                            compute_id)
+            raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
+        return rs
+
+    @asyncio.coroutine
+    def get_compute_flavor_info_list(self):
+        #rc, rs = self._rwcal.get_flavor_list(self._account)
+        self._log.debug("Calling get_flavor_list API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_flavor_list,
+                                                       self._account)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-flavor-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-flavor-info-list operation failed for cloud account: %s" %(self._account.name))
+        return rs.flavorinfo_list
+
+    @asyncio.coroutine
+    def create_compute_flavor(self, request):
+        flavor = RwcalYang.FlavorInfoItem()
+        flavor.name = str(uuid.uuid4())
+        epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate']
+        epa_dict = {k: v for k, v in request.as_dict().items() if k in epa_types}
+        flavor.from_dict(epa_dict)
+
+        self._log.info("Creating flavor: %s", flavor)
+        #rc, rs = self._rwcal.create_flavor(self._account, flavor)
+        self._log.debug("Calling create_flavor API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.create_flavor,
+                                                       self._account,
+                                                       flavor)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Create-flavor operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Create-flavor operation failed for cloud account: %s" %(self._account.name))
+        return rs
+
+    @asyncio.coroutine
+    def get_image_info_list(self):
+        #rc, rs = self._rwcal.get_image_list(self._account)
+        self._log.debug("Calling get_image_list API")
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_image_list,
+                                                       self._account)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-image-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-image-info-list operation failed for cloud account: %s" %(self._account.name))
+        return rs.imageinfo_list
+
+    @asyncio.coroutine
+    def get_image_id_from_image_info(self, image_name, image_checksum=None):
+        self._log.debug("Looking up image id for image name %s and checksum %s on cloud account: %s",
+                image_name, image_checksum, self._account.name
+                )
+
+        image_list = yield from self.get_image_info_list()
+        matching_images = [i for i in image_list if i.name == image_name]
+
+        # If the image checksum was filled in then further filter the images by the checksum
+        if image_checksum is not None:
+            matching_images = [i for i in matching_images if i.checksum == image_checksum]
+        else:
+            self._log.warning("Image checksum not provided.  Lookup using image name (%s) only.",
+                              image_name)
+
+        if len(matching_images) == 0:
+            raise ResMgrCALOperationFailure("Could not find image name {} (using checksum: {}) for cloud account: {}".format(
+                image_name, image_checksum, self._account.name
+                ))
+
+        elif len(matching_images) > 1:
+            unique_checksums = {i.checksum for i in matching_images}
+            if len(unique_checksums) > 1:
+                msg = ("Too many images with different checksums matched "
+                       "image name of %s for cloud account: %s" % (image_name, self._account.name))
+                raise ResMgrCALOperationFailure(msg)
+
+        return matching_images[0].id
+
+    @asyncio.coroutine
+    def get_image_info(self, image_id):
+        #rc, rs = self._rwcal.get_image(self._account, image_id)
+        self._log.debug("Calling get_image API for id: %s" %(image_id))
+        rc, rs = yield from self._loop.run_in_executor(self._executor,
+                                                       self._rwcal.get_image,
+                                                       self._account,
+                                                       image_id)
+        if rc != RwStatus.SUCCESS:
+            self._log.error("Get-image-info-list operation failed for cloud account: %s",
+                            self._account.name)
+            raise ResMgrCALOperationFailure("Get-image-info operation failed for cloud account: %s" %(self._account.name))
+        return rs.imageinfo_list
+
+    def dynamic_flavor_supported(self):
+        return getattr(self._account, self._account.account_type).dynamic_flavor_support
+
+
+class Resource(object):
+    def __init__(self, resource_id, resource_type):
+        self._id = resource_id
+        self._type = resource_type
+
+    @property
+    def resource_id(self):
+        return self._id
+
+    @property
+    def resource_type(self):
+        return self._type
+
+    def cleanup(self):
+        pass
+
+
+class ComputeResource(Resource):
+    def __init__(self, resource_id, resource_type):
+        super(ComputeResource, self).__init__(resource_id, resource_type)
+
+
+class NetworkResource(Resource):
+    def __init__(self, resource_id, resource_type):
+        super(NetworkResource, self).__init__(resource_id, resource_type)
+
+
+class ResourcePoolInfo(object):
+    def __init__(self, name, pool_type, resource_type, max_size):
+        self.name = name
+        self.pool_type = pool_type
+        self.resource_type = resource_type
+        self.max_size = max_size
+
+    @classmethod
+    def from_dict(cls, pool_dict):
+        return cls(
+                pool_dict["name"],
+                pool_dict["pool_type"],
+                pool_dict["resource_type"],
+                pool_dict["max_size"],
+                )
+
+
+class ResourcePool(object):
+    def __init__(self, log, loop, pool_info, resource_class, cal):
+        self._log = log
+        self._loop = loop
+        self._name = pool_info.name
+        self._pool_type = pool_info.pool_type
+        self._resource_type = pool_info.resource_type
+        self._cal = cal
+        self._resource_class = resource_class
+
+        self._max_size = pool_info.max_size
+
+        self._status = 'unlocked'
+        ### A Dictionary of all the resources in this pool, keyed by CAL resource-id
+        self._all_resources = {}
+        ### A List of free resources in this pool
+        self._free_resources = []
+        ### A Dictionary of all the allocated resources in this pool, keyed by CAL resource-id
+        self._allocated_resources = {}
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def cal(self):
+        """ This instance's ResourceMgrCALHandler """
+        return self._cal
+
+    @property
+    def pool_type(self):
+        return self._pool_type
+
+    @property
+    def resource_type(self):
+        return self._resource_type
+
+    @property
+    def max_size(self):
+        return self._max_size
+
+    @property
+    def status(self):
+        return self._status
+
+    def in_use(self):
+        if len(self._allocated_resources) != 0:
+            return True
+        else:
+            return False
+
+    def update_cal_handler(self, cal):
+        if self.in_use():
+            raise ResMgrPoolOperationFailed(
+                    "Cannot update CAL plugin for in use pool"
+                    )
+
+        self._cal = cal
+
+    def lock_pool(self):
+        self._log.info("Locking the pool :%s", self.name)
+        self._status = 'locked'
+
+    def unlock_pool(self):
+        self._log.info("Unlocking the pool :%s", self.name)
+        self._status = 'unlocked'
+
+    def add_resource(self, resource_info):
+        self._log.info("Adding static resource to Pool: %s, Resource-id: %s Resource-Type: %s",
+                       self.name,
+                       resource_info.resource_id,
+                       self.resource_type)
+
+        ### Add static resources to pool
+        resource = self._resource_class(resource_info.resource_id, 'static')
+        assert resource.resource_id == resource_info.resource_id
+        self._all_resources[resource.resource_id] = resource
+        self._free_resources.append(resource)
+
+    def delete_resource(self, resource_id):
+        if resource_id not in self._all_resources:
+            self._log.error("Resource Id: %s not present in pool: %s. Delete operation failed", resource_id, self.name)
+            raise ResMgrUnknownResourceId("Resource Id: %s requested for release is not found" %(resource_id))
+
+        if resource_id in self._allocated_resources:
+            self._log.error("Resource Id: %s in use. Delete operation failed", resource_id)
+            raise ResMgrResourceIdBusy("Resource Id: %s requested for release is in use" %(resource_id))
+
+        self._log.info("Deleting resource: %s from pool: %s, Resource-Type",
+                       resource_id,
+                       self.name,
+                       self.resource_type)
+
+        resource = self._all_resources.pop(resource_id)
+        self._free_resources.remove(resource)
+        resource.cleanup()
+        del resource
+
+    @asyncio.coroutine
+    def read_resource_info(self, resource_id):
+        if resource_id not in self._all_resources:
+            self._log.error("Resource Id: %s not present in pool: %s. Read operation failed", resource_id, self.name)
+            raise ResMgrUnknownResourceId("Resource Id: %s requested for read is not found" %(resource_id))
+
+        if resource_id not in self._allocated_resources:
+            self._log.error("Resource Id: %s not in use. Read operation failed", resource_id)
+            raise ResMgrResourceIdNotAllocated("Resource Id: %s not in use. Read operation failed" %(resource_id))
+
+        resource = self._allocated_resources[resource_id]
+        resource_info = yield from self.get_resource_info(resource)
+        return resource_info
+
+    def get_pool_info(self):
+        info = RwResourceMgrYang.ResourceRecordInfo()
+        self._log.info("Providing info for pool: %s", self.name)
+        info.name = self.name
+        if self.pool_type:
+            info.pool_type = self.pool_type
+        if self.resource_type:
+            info.resource_type = self.resource_type
+        if self.status:
+            info.pool_status = self.status
+
+        info.total_resources = len(self._all_resources)
+        info.free_resources = len(self._free_resources)
+        info.allocated_resources = len(self._allocated_resources)
+        return info
+
+    def cleanup(self):
+        for _, v in self._all_resources.items():
+            v.cleanup()
+
+    @asyncio.coroutine
+    def _allocate_static_resource(self, request, resource_type):
+        unit_type = {'compute': 'VDU', 'network':'VirtualLink'}
+        match_found = False
+        resource = None
+        self._log.info("Doing resource match from pool :%s", self._free_resources)
+        for resource in self._free_resources:
+            resource_info = yield from self.get_resource_info(resource)
+            self._log.info("Attempting to match %s-requirements for %s: %s with resource-id :%s",
+                           resource_type, unit_type[resource_type],request.name, resource.resource_id)
+            if self.match_epa_params(resource_info, request):
+                if self.match_image_params(resource_info, request):
+                    match_found = True
+                    self._log.info("%s-requirements matched for %s: %s with resource-id :%s",
+                                   resource_type, unit_type[resource_type],request.name, resource.resource_id)
+                    yield from self.initialize_resource_in_cal(resource, request)
+            break
+
+        if not match_found:
+            self._log.error("No match found for %s-requirements for %s: %s in pool: %s. %s instantiation failed",
+                            resource_type,
+                            unit_type[resource_type],
+                            request.name,
+                            self.name,
+                            unit_type[resource_type])
+            return None
+        else:
+            ### Move resource from free-list into allocated-list
+            self._log.info("Allocating the static resource with resource-id: %s for %s: %s",
+                           resource.resource_id,
+                           unit_type[resource_type],request.name)
+            self._free_resources.remove(resource)
+            self._allocated_resources[resource.resource_id] = resource
+
+        return resource
+
+    @asyncio.coroutine
+    def allocate_resource(self, request):
+        resource = yield from self.allocate_resource_in_cal(request)
+        resource_info =  yield from self.get_resource_info(resource)
+        return resource.resource_id, resource_info
+
+    @asyncio.coroutine
+    def release_resource(self, resource_id):
+        self._log.debug("Releasing resource_id %s in pool %s", resource_id, self.name)
+        if resource_id not in self._allocated_resources:
+            self._log.error("Failed to release a resource with resource-id: %s in pool: %s. Resource not known",
+                            resource_id,
+                            self.name)
+            raise ResMgrUnknownResourceId("Failed to release resource with resource-id: %s. Unknown resource-id" %(resource_id))
+
+        ### Get resource object
+        resource = self._allocated_resources.pop(resource_id)
+        yield from self.uninitialize_resource_in_cal(resource)
+        yield from self.release_cal_resource(resource)
+
+
+class NetworkPool(ResourcePool):
+    def __init__(self, log, loop, pool_info, cal):
+        super(NetworkPool, self).__init__(log, loop, pool_info, NetworkResource, cal)
+
+    @asyncio.coroutine
+    def allocate_resource_in_cal(self, request):
+        resource = None
+        if self.pool_type == 'static':
+            self._log.info("Attempting network resource allocation from static pool: %s", self.name)
+            ### Attempt resource allocation from static pool
+            resource = yield from self._allocate_static_resource(request, 'network')
+        elif self.pool_type == 'dynamic':
+            ### Attempt resource allocation from dynamic pool
+            self._log.info("Attempting network resource allocation from dynamic pool: %s", self.name)
+            if len(self._free_resources) != 0:
+                self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources",
+                               self.name, len(self._free_resources))
+                resource =  yield from self._allocate_static_resource(request, 'network')
+            if resource is None:
+                self._log.info("Could not resource from static resources. Going for dynamic resource allocation")
+                ## Not static resource available. Attempt dynamic resource from pool
+                resource = yield from self.allocate_dynamic_resource(request)
+        if resource is None:
+            raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name))
+        return resource
+
+    @asyncio.coroutine
+    def allocate_dynamic_resource(self, request):
+        resource_type, resource_id = yield from self._cal.create_virtual_network(request)
+        if resource_id in self._all_resources:
+            self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
+            raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
+        resource = self._resource_class(resource_id, resource_type)
+        self._all_resources[resource_id] = resource
+        self._allocated_resources[resource_id] = resource
+        self._log.info("Successfully allocated virtual-network resource from CAL with resource-id: %s", resource_id)
+        return resource
+
+    @asyncio.coroutine
+    def release_cal_resource(self, resource):
+        if resource.resource_type == 'dynamic':
+            self._log.debug("Deleting virtual network with network_id: %s", resource.resource_id)
+            yield from self._cal.delete_virtual_network(resource.resource_id)
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully released virtual-network resource in CAL with resource-id: %s", resource.resource_id)
+        elif resource.resource_type == 'precreated':
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully removed precreated virtual-network resource from allocated list: %s", resource.resource_id)
+        else:
+            self._log.info("Successfully released virtual-network resource with resource-id: %s into available-list", resource.resource_id)
+            self._free_resources.append(resource)
+
+    @asyncio.coroutine
+    def get_resource_info(self, resource):
+        info = yield from self._cal.get_virtual_network_info(resource.resource_id)
+        self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s",
+                       resource.resource_id, str(info))
+        response = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+        response.from_dict(info.as_dict())
+        response.pool_name = self.name
+        response.resource_state = 'active'
+        return response
+
+    @asyncio.coroutine
+    def get_info_by_id(self, resource_id):
+        info = yield from self._cal.get_virtual_network_info(resource_id)
+        self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s",
+                       resource_id, str(info))
+        return info
+
+    def match_image_params(self, resource_info, request_params):
+        return True
+
+    def match_epa_params(self, resource_info, request_params):
+        if not hasattr(request_params, 'provider_network'):
+            ### Its a match if nothing is requested
+            return True
+        else:
+            required = getattr(request_params, 'provider_network')
+
+        if not hasattr(resource_info, 'provider_network'):
+            ### Its no match
+            return False
+        else:
+            available = getattr(resource_info, 'provider_network')
+
+        self._log.debug("Matching Network EPA params. Required: %s, Available: %s", required, available)
+
+        if required.has_field('name') and required.name!= available.name:
+            self._log.debug("Provider Network mismatch. Required: %s, Available: %s",
+                            required.name,
+                            available.name)
+            return False
+
+        self._log.debug("Matching EPA params physical network name")
+
+        if required.has_field('physical_network') and required.physical_network != available.physical_network:
+            self._log.debug("Physical Network mismatch. Required: %s, Available: %s",
+                            required.physical_network,
+                            available.physical_network)
+            return False
+
+        self._log.debug("Matching EPA params overlay type")
+        if required.has_field('overlay_type') and required.overlay_type != available.overlay_type:
+            self._log.debug("Overlay type mismatch. Required: %s, Available: %s",
+                            required.overlay_type,
+                            available.overlay_type)
+            return False
+
+        self._log.debug("Matching EPA params SegmentationID")
+        if required.has_field('segmentation_id') and required.segmentation_id != available.segmentation_id:
+            self._log.debug("Segmentation-Id mismatch. Required: %s, Available: %s",
+                            required.segmentation_id,
+                            available.segmentation_id)
+            return False
+        return True
+
+    @asyncio.coroutine
+    def initialize_resource_in_cal(self, resource, request):
+        pass
+
+    @asyncio.coroutine
+    def uninitialize_resource_in_cal(self, resource):
+        pass
+
+
+class ComputePool(ResourcePool):
+    def __init__(self, log, loop, pool_info, cal):
+        super(ComputePool, self).__init__(log, loop, pool_info, ComputeResource, cal)
+
+    @asyncio.coroutine
+    def allocate_resource_in_cal(self, request):
+        resource = None
+        if self.pool_type == 'static':
+            self._log.info("Attempting compute resource allocation from static pool: %s", self.name)
+            ### Attempt resource allocation from static pool
+            resource = yield from self._allocate_static_resource(request, 'compute')
+        elif self.pool_type == 'dynamic':
+            ### Attempt resource allocation from dynamic pool
+            self._log.info("Attempting compute resource allocation from dynamic pool: %s", self.name)
+            if len(self._free_resources) != 0:
+                self._log.info("Dynamic pool: %s has %d static resources, Attempting resource allocation from static resources",
+                               len(self._free_resources),
+                               self.name)
+                resource = yield from self._allocate_static_resource(request, 'compute')
+            if resource is None:
+                self._log.info("Attempting for dynamic resource allocation")
+                resource = yield from self.allocate_dynamic_resource(request)
+        if resource is None:
+            raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name))
+
+        requested_params = RwcalYang.VDUInitParams()
+        requested_params.from_dict(request.as_dict())
+        resource.requested_params = requested_params
+        return resource
+
+    @asyncio.coroutine
+    def allocate_dynamic_resource(self, request):
+        #request.flavor_id = yield from self.select_resource_flavor(request)
+        resource_id = yield from self._cal.create_virtual_compute(request)
+        resource = self._resource_class(resource_id, 'dynamic')
+        self._all_resources[resource_id] = resource
+        self._allocated_resources[resource_id] = resource
+        self._log.info("Successfully allocated virtual-compute resource from CAL with resource-id: %s", resource_id)
+        return resource
+
+    @asyncio.coroutine
+    def release_cal_resource(self, resource):
+        if hasattr(resource, 'requested_params'):
+            delattr(resource, 'requested_params')
+        if resource.resource_type == 'dynamic':
+            yield from self._cal.delete_virtual_compute(resource.resource_id)
+            self._all_resources.pop(resource.resource_id)
+            self._log.info("Successfully released virtual-compute resource in CAL with resource-id: %s", resource.resource_id)
+        else:
+            self._log.info("Successfully released virtual-compute resource with resource-id: %s into available-list", resource.resource_id)
+            self._free_resources.append(resource)
+
+    @asyncio.coroutine
+    def get_resource_info(self, resource):
+        info = yield from self._cal.get_virtual_compute_info(resource.resource_id)
+        self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
+                       resource.resource_id, str(info))
+        response = RwResourceMgrYang.VDUEventData_ResourceInfo()
+        response.from_dict(info.as_dict())
+        response.pool_name = self.name
+        response.resource_state = self._get_resource_state(info, resource.requested_params)
+        return response
+
+    @asyncio.coroutine
+    def get_info_by_id(self, resource_id):
+        info = yield from self._cal.get_virtual_compute_info(resource_id)
+        self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
+                       resource_id, str(info))
+        return info 
+
+    def _get_resource_state(self, resource_info, requested_params):
+        if resource_info.state == 'failed':
+            self._log.error("<Compute-Resource: %s> Reached failed state.",
+                            resource_info.name)
+            return 'failed'
+
+        if resource_info.state != 'active':
+            self._log.info("<Compute-Resource: %s> Not reached active state.",
+                           resource_info.name)
+            return 'pending'
+
+        if not resource_info.has_field('management_ip') or resource_info.management_ip == '':
+            self._log.info("<Compute-Resource: %s> Management IP not assigned.",
+                           resource_info.name)
+            return 'pending'
+
+        if (requested_params.has_field('allocate_public_address')) and (requested_params.allocate_public_address == True):
+            if not resource_info.has_field('public_ip'):
+                self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for public ip, %s",
+                                  resource_info.name, requested_params)
+                return 'pending'
+
+        if(len(requested_params.connection_points) != 
+           len(resource_info.connection_points)):
+            self._log.warning("<Compute-Resource: %s> Waiting for requested number of ports to be assigned to virtual-compute, requested: %d, assigned: %d",
+                              resource_info.name,
+                              len(requested_params.connection_points),
+                              len(resource_info.connection_points))
+            return 'pending'
+
+        #not_active = [c for c in resource_info.connection_points
+        #              if c.state != 'active']
+
+        #if not_active:
+        #    self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for connection_points , %s",
+        #                      resource_info.name, resource_info)
+        #    return 'pending'
+
+        ## Find the connection_points which are in active state but does not have IP address
+        no_address = [c for c in resource_info.connection_points
+                      if (c.state == 'active') and (not c.has_field('ip_address'))]
+
+        if no_address:
+            self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for connection_points , %s",
+                              resource_info.name, resource_info)
+            return 'pending'
+
+        return 'active'
+
+    @asyncio.coroutine
+    def select_resource_flavor(self, request):
+        flavors = yield from self._cal.get_compute_flavor_info_list()
+        self._log.debug("Received %d flavor information from RW.CAL", len(flavors))
+        flavor_id = None
+        match_found = False
+        for flv in flavors:
+            self._log.info("Attempting to match compute requirement for VDU: %s with flavor %s",
+                           request.name, flv)
+            if self.match_epa_params(flv, request):
+                self._log.info("Flavor match found for compute requirements for VDU: %s with flavor name: %s, flavor-id: %s",
+                               request.name, flv.name, flv.id)
+                match_found = True
+                flavor_id = flv.id
+                break
+
+        if not match_found:
+            ### Check if CAL account allows dynamic flavor creation
+            if self._cal.dynamic_flavor_supported():
+                self._log.info("Attempting to create a new flavor for required compute-requirement for VDU: %s", request.name)
+                flavor_id = yield from self._cal.create_compute_flavor(request)
+            else:
+                ### No match with existing flavors and CAL does not support dynamic flavor creation
+                self._log.error("Unable to create flavor for compute requirement for VDU: %s. VDU instantiation failed", request.name)
+                raise ResMgrNoResourcesAvailable("No resource available with matching EPA attributes")
+        else:
+            ### Found flavor
+            self._log.info("Found flavor with id: %s for compute requirement for VDU: %s",
+                           flavor_id, request.name)
+        return flavor_id
+
+    def _match_vm_flavor(self, required, available):
+        self._log.info("Matching VM Flavor attributes")
+        if available.vcpu_count != required.vcpu_count:
+            self._log.debug("VCPU requirement mismatch. Required: %d, Available: %d",
+                            required.vcpu_count,
+                            available.vcpu_count)
+            return False
+        if available.memory_mb != required.memory_mb:
+            self._log.debug("Memory requirement mismatch. Required: %d MB, Available: %d MB",
+                            required.memory_mb,
+                            available.memory_mb)
+            return False
+        if available.storage_gb != required.storage_gb:
+            self._log.debug("Storage requirement mismatch. Required: %d GB, Available: %d GB",
+                            required.storage_gb,
+                            available.storage_gb)
+            return False
+        self._log.debug("VM Flavor match found")
+        return True
+
+    def _match_guest_epa(self, required, available):
+        self._log.info("Matching Guest EPA attributes")
+        if required.has_field('pcie_device'):
+            self._log.debug("Matching pcie_device")
+            if available.has_field('pcie_device') == False:
+                self._log.debug("Matching pcie_device failed. Not available in flavor")
+                return False
+            else:
+                for dev in required.pcie_device:
+                    if not [ d for d in available.pcie_device
+                             if ((d.device_id == dev.device_id) and (d.count == dev.count)) ]:
+                        self._log.debug("Matching pcie_device failed. Required: %s, Available: %s", required.pcie_device, available.pcie_device)
+                        return False
+        elif available.has_field('pcie_device'):
+            self._log.debug("Rejecting available flavor because pcie_device not required but available")
+            return False
+                        
+                    
+        if required.has_field('mempage_size'):
+            self._log.debug("Matching mempage_size")
+            if available.has_field('mempage_size') == False:
+                self._log.debug("Matching mempage_size failed. Not available in flavor")
+                return False
+            else:
+                if required.mempage_size != available.mempage_size:
+                    self._log.debug("Matching mempage_size failed. Required: %s, Available: %s", required.mempage_size, available.mempage_size)
+                    return False
+        elif available.has_field('mempage_size'):
+            self._log.debug("Rejecting available flavor because mempage_size not required but available")
+            return False
+        
+        if required.has_field('cpu_pinning_policy'):
+            self._log.debug("Matching cpu_pinning_policy")
+            if required.cpu_pinning_policy != 'ANY':
+                if available.has_field('cpu_pinning_policy') == False:
+                    self._log.debug("Matching cpu_pinning_policy failed. Not available in flavor")
+                    return False
+                else:
+                    if required.cpu_pinning_policy != available.cpu_pinning_policy:
+                        self._log.debug("Matching cpu_pinning_policy failed. Required: %s, Available: %s", required.cpu_pinning_policy, available.cpu_pinning_policy)
+                        return False
+        elif available.has_field('cpu_pinning_policy'):
+            self._log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
+            return False
+        
+        if required.has_field('cpu_thread_pinning_policy'):
+            self._log.debug("Matching cpu_thread_pinning_policy")
+            if available.has_field('cpu_thread_pinning_policy') == False:
+                self._log.debug("Matching cpu_thread_pinning_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_thread_pinning_policy != available.cpu_thread_pinning_policy:
+                    self._log.debug("Matching cpu_thread_pinning_policy failed. Required: %s, Available: %s", required.cpu_thread_pinning_policy, available.cpu_thread_pinning_policy)
+                    return False
+        elif available.has_field('cpu_thread_pinning_policy'):
+            self._log.debug("Rejecting available flavor because cpu_thread_pinning_policy not required but available")
+            return False
+
+        if required.has_field('trusted_execution'):
+            self._log.debug("Matching trusted_execution")
+            if required.trusted_execution == True:
+                if available.has_field('trusted_execution') == False:
+                    self._log.debug("Matching trusted_execution failed. Not available in flavor")
+                    return False
+                else:
+                    if required.trusted_execution != available.trusted_execution:
+                        self._log.debug("Matching trusted_execution failed. Required: %s, Available: %s", required.trusted_execution, available.trusted_execution)
+                        return False
+        elif available.has_field('trusted_execution'):
+            self._log.debug("Rejecting available flavor because trusted_execution not required but available")
+            return False
+        
+        if required.has_field('numa_node_policy'):
+            self._log.debug("Matching numa_node_policy")
+            if available.has_field('numa_node_policy') == False:
+                self._log.debug("Matching numa_node_policy failed. Not available in flavor")
+                return False
+            else:
+                if required.numa_node_policy.has_field('node_cnt'):
+                    self._log.debug("Matching numa_node_policy node_cnt")
+                    if available.numa_node_policy.has_field('node_cnt') == False:
+                        self._log.debug("Matching numa_node_policy node_cnt failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.node_cnt != available.numa_node_policy.node_cnt:
+                            self._log.debug("Matching numa_node_policy node_cnt failed. Required: %s, Available: %s",required.numa_node_policy.node_cnt, available.numa_node_policy.node_cnt)
+                            return False
+                elif available.numa_node_policy.has_field('node_cnt'):
+                    self._log.debug("Rejecting available flavor because numa node count not required but available")
+                    return False
+                
+                if required.numa_node_policy.has_field('mem_policy'):
+                    self._log.debug("Matching numa_node_policy mem_policy")
+                    if available.numa_node_policy.has_field('mem_policy') == False:
+                        self._log.debug("Matching numa_node_policy mem_policy failed. Not available in flavor")
+                        return False
+                    else:
+                        if required.numa_node_policy.mem_policy != available.numa_node_policy.mem_policy:
+                            self._log.debug("Matching numa_node_policy mem_policy failed. Required: %s, Available: %s", required.numa_node_policy.mem_policy, available.numa_node_policy.mem_policy)
+                            return False
+                elif available.numa_node_policy.has_field('mem_policy'):
+                    self._log.debug("Rejecting available flavor because num node mem_policy not required but available")
+                    return False
+
+                if required.numa_node_policy.has_field('node'):
+                    self._log.debug("Matching numa_node_policy nodes configuration")
+                    if available.numa_node_policy.has_field('node') == False:
+                        self._log.debug("Matching numa_node_policy nodes configuration failed. Not available in flavor")
+                        return False
+                    for required_node in required.numa_node_policy.node:
+                        self._log.debug("Matching numa_node_policy nodes configuration for node %s", required_node)
+                        numa_match = False
+                        for available_node in available.numa_node_policy.node:
+                            if required_node.id != available_node.id:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.vcpu != available_node.vcpu:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            if required_node.memory_mb != available_node.memory_mb:
+                                self._log.debug("Matching numa_node_policy nodes configuration failed. Required: %s, Available: %s", required_node, available_node)
+                                continue
+                            numa_match = True
+                        if numa_match == False:
+                            return False
+                elif available.numa_node_policy.has_field('node'):
+                    self._log.debug("Rejecting available flavor because numa nodes not required but available")
+                    return False
+        elif available.has_field('numa_node_policy'):
+            self._log.debug("Rejecting available flavor because numa_node_policy not required but available")
+            return False
+        self._log.info("Successful match for Guest EPA attributes")
+        return True
+
+    def _match_vswitch_epa(self, required, available):
+        self._log.debug("VSwitch EPA match found")
+        return True
+
+    def _match_hypervisor_epa(self, required, available):
+        self._log.debug("Hypervisor EPA match found")
+        return True
+
+    def _match_host_epa(self, required, available):
+        self._log.info("Matching Host EPA attributes")
+        if required.has_field('cpu_model'):
+            self._log.debug("Matching CPU model")
+            if available.has_field('cpu_model') == False:
+                self._log.debug("Matching CPU model failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_model.replace('PREFER', 'REQUIRE') != available.cpu_model:
+                    self._log.debug("Matching CPU model failed. Required: %s, Available: %s", required.cpu_model, available.cpu_model)
+                    return False
+        elif available.has_field('cpu_model'):
+            self._log.debug("Rejecting available flavor because cpu_model not required but available")
+            return False
+        
+        if required.has_field('cpu_arch'):
+            self._log.debug("Matching CPU architecture")
+            if available.has_field('cpu_arch') == False:
+                self._log.debug("Matching CPU architecture failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_arch.replace('PREFER', 'REQUIRE') != available.cpu_arch:
+                    self._log.debug("Matching CPU architecture failed. Required: %s, Available: %s", required.cpu_arch, available.cpu_arch)
+                    return False
+        elif available.has_field('cpu_arch'):
+            self._log.debug("Rejecting available flavor because cpu_arch not required but available")
+            return False
+        
+        if required.has_field('cpu_vendor'):
+            self._log.debug("Matching CPU vendor")
+            if available.has_field('cpu_vendor') == False:
+                self._log.debug("Matching CPU vendor failed. Not available in flavor")
+                return False
+            else:
+                #### Convert all PREFER to REQUIRE since flavor will only have REQUIRE attributes
+                if required.cpu_vendor.replace('PREFER', 'REQUIRE') != available.cpu_vendor:
+                    self._log.debug("Matching CPU vendor failed. Required: %s, Available: %s", required.cpu_vendor, available.cpu_vendor)
+                    return False
+        elif available.has_field('cpu_vendor'):
+            self._log.debug("Rejecting available flavor because cpu_vendor not required but available")
+            return False
+
+        if required.has_field('cpu_socket_count'):
+            self._log.debug("Matching CPU socket count")
+            if available.has_field('cpu_socket_count') == False:
+                self._log.debug("Matching CPU socket count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_socket_count != available.cpu_socket_count:
+                    self._log.debug("Matching CPU socket count failed. Required: %s, Available: %s", required.cpu_socket_count, available.cpu_socket_count)
+                    return False
+        elif available.has_field('cpu_socket_count'):
+            self._log.debug("Rejecting available flavor because cpu_socket_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_count'):
+            self._log.debug("Matching CPU core count")
+            if available.has_field('cpu_core_count') == False:
+                self._log.debug("Matching CPU core count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_count != available.cpu_core_count:
+                    self._log.debug("Matching CPU core count failed. Required: %s, Available: %s", required.cpu_core_count, available.cpu_core_count)
+                    return False
+        elif available.has_field('cpu_core_count'):
+            self._log.debug("Rejecting available flavor because cpu_core_count not required but available")
+            return False
+        
+        if required.has_field('cpu_core_thread_count'):
+            self._log.debug("Matching CPU core thread count")
+            if available.has_field('cpu_core_thread_count') == False:
+                self._log.debug("Matching CPU core thread count failed. Not available in flavor")
+                return False
+            else:
+                if required.cpu_core_thread_count != available.cpu_core_thread_count:
+                    self._log.debug("Matching CPU core thread count failed. Required: %s, Available: %s", required.cpu_core_thread_count, available.cpu_core_thread_count)
+                    return False
+        elif available.has_field('cpu_core_thread_count'):
+            self._log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
+            return False
+    
+        if required.has_field('cpu_feature'):
+            self._log.debug("Matching CPU feature list")
+            if available.has_field('cpu_feature') == False:
+                self._log.debug("Matching CPU feature list failed. Not available in flavor")
+                return False
+            else:
+                for feature in required.cpu_feature:
+                    if feature not in available.cpu_feature:
+                        self._log.debug("Matching CPU feature list failed. Required feature: %s is not present. Available features: %s", feature, available.cpu_feature)
+                        return False
+        elif available.has_field('cpu_feature'):
+            self._log.debug("Rejecting available flavor because cpu_feature not required but available")
+            return False
+        self._log.info("Successful match for Host EPA attributes")            
+        return True
+
+
+    def _match_placement_group_inputs(self, required, available):
+        self._log.info("Matching Host aggregate attributes")
+        
+        if not required and not available:
+            # Host aggregate not required and not available => success
+            self._log.info("Successful match for Host Aggregate attributes")
+            return True
+        if required and available:
+            # Host aggregate requested and available => Do a match and decide
+            xx = [ x.as_dict() for x in required ]
+            yy = [ y.as_dict() for y in available ]
+            for i in xx:
+                if i not in yy:
+                    self._log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+                    return False
+            self._log.info("Successful match for Host Aggregate attributes")
+            return True
+        else:
+            # Either of following conditions => Failure
+            #  - Host aggregate required but not available
+            #  - Host aggregate not required but available
+            self._log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
+            return False
+                    
+    
+    def match_image_params(self, resource_info, request_params):
+        return True
+
+    def match_epa_params(self, resource_info, request_params):
+        result = self._match_vm_flavor(getattr(request_params, 'vm_flavor'),
+                                       getattr(resource_info, 'vm_flavor'))
+        if result == False:
+            self._log.debug("VM Flavor mismatched")
+            return False
+
+        result = self._match_guest_epa(getattr(request_params, 'guest_epa'),
+                                       getattr(resource_info, 'guest_epa'))
+        if result == False:
+            self._log.debug("Guest EPA mismatched")
+            return False
+
+        result = self._match_vswitch_epa(getattr(request_params, 'vswitch_epa'),
+                                         getattr(resource_info, 'vswitch_epa'))
+        if result == False:
+            self._log.debug("Vswitch EPA mismatched")
+            return False
+
+        result = self._match_hypervisor_epa(getattr(request_params, 'hypervisor_epa'),
+                                            getattr(resource_info, 'hypervisor_epa'))
+        if result == False:
+            self._log.debug("Hypervisor EPA mismatched")
+            return False
+
+        result = self._match_host_epa(getattr(request_params, 'host_epa'),
+                                      getattr(resource_info, 'host_epa'))
+        if result == False:
+            self._log.debug("Host EPA mismatched")
+            return False
+
+        result = self._match_placement_group_inputs(getattr(request_params, 'host_aggregate'),
+                                                    getattr(resource_info, 'host_aggregate'))
+
+        if result == False:
+            self._log.debug("Host Aggregate mismatched")
+            return False
+        
+        return True
+
+    @asyncio.coroutine
+    def initialize_resource_in_cal(self, resource, request):
+        self._log.info("Initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
+        modify_params = RwcalYang.VDUModifyParams()
+        modify_params.vdu_id = resource.resource_id
+        modify_params.image_id = request.image_id
+
+        for c_point in request.connection_points:
+            self._log.debug("Adding connection point for VDU: %s to virtual-compute with id: %s  Connection point Name: %s",
+                            request.name,resource.resource_id,c_point.name)
+            point = modify_params.connection_points_add.add()
+            point.name = c_point.name
+            point.virtual_link_id = c_point.virtual_link_id
+        yield from self._cal.modify_virtual_compute(modify_params)
+
+    @asyncio.coroutine        
+    def uninitialize_resource_in_cal(self, resource):
+        self._log.info("Un-initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
+        modify_params = RwcalYang.VDUModifyParams()
+        modify_params.vdu_id = resource.resource_id
+        resource_info =  yield from self.get_resource_info(resource)
+        for c_point in resource_info.connection_points:
+            self._log.debug("Removing connection point: %s from VDU: %s ",
+                            c_point.name,resource_info.name)
+            point = modify_params.connection_points_remove.add()
+            point.connection_point_id = c_point.connection_point_id
+        yield from self._cal.modify_virtual_compute(modify_params)
+
+
+class ResourceMgrCore(object):
+    def __init__(self, dts, log, log_hdl, loop, parent):
+        self._log = log
+        self._log_hdl = log_hdl
+        self._dts = dts
+        self._loop = loop
+        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+        self._parent = parent
+        self._cloud_cals = {}
+        # Dictionary of pool objects keyed by name
+        self._cloud_pool_table = {}
+        # Dictionary of tuples (resource_id, cloud_account_name, pool_name) keyed by event_id
+        self._resource_table = {}
+        self._pool_class = {'compute': ComputePool,
+                            'network': NetworkPool}
+
+    def _get_cloud_pool_table(self, cloud_account_name):
+        if cloud_account_name not in self._cloud_pool_table:
+            msg = "Cloud account %s not found" % cloud_account_name
+            self._log.error(msg)
+            raise ResMgrCloudAccountNotFound(msg)
+
+        return self._cloud_pool_table[cloud_account_name]
+
+    def _get_cloud_cal_plugin(self, cloud_account_name):
+        if cloud_account_name not in self._cloud_cals:
+            msg = "Cloud account %s not found" % cloud_account_name
+            self._log.error(msg)
+            raise ResMgrCloudAccountNotFound(msg)
+
+        return self._cloud_cals[cloud_account_name]
+
+    def _add_default_cloud_pools(self, cloud_account_name):
+        self._log.debug("Adding default compute and network pools for cloud account %s",
+                        cloud_account_name)
+        default_pools = [
+                    {
+                        'name': '____default_compute_pool',
+                        'resource_type': 'compute',
+                        'pool_type': 'dynamic',
+                        'max_size': 128,
+                    },
+                    {
+                        'name': '____default_network_pool',
+                        'resource_type': 'network',
+                        'pool_type': 'dynamic',
+                        'max_size': 128,
+                    },
+                ]
+
+        for pool_dict in default_pools:
+            pool_info = ResourcePoolInfo.from_dict(pool_dict)
+            self._log.info("Applying configuration for cloud account %s pool: %s",
+                           cloud_account_name, pool_info.name)
+
+            self.add_resource_pool(cloud_account_name, pool_info)
+            self.unlock_resource_pool(cloud_account_name, pool_info.name)
+
+    def get_cloud_account_names(self):
+        """ Returns a list of configured cloud account names """
+        return self._cloud_cals.keys()
+
+    def add_cloud_account(self, account):
+        self._log.debug("Received CAL account. Account Name: %s, Account Type: %s",
+                        account.name, account.account_type)
+
+        ### Add cal handler to all the pools
+        if account.name in self._cloud_cals:
+            raise ResMgrCloudAccountExists("Cloud account already exists in res mgr: %s",
+                                           account.name)
+
+        self._cloud_pool_table[account.name] = {}
+
+        cal = ResourceMgrCALHandler(self._loop, self._executor, self._log, self._log_hdl, account)
+        self._cloud_cals[account.name] = cal
+
+        self._add_default_cloud_pools(account.name)
+
+    def update_cloud_account(self, account):
+        raise NotImplementedError("Update cloud account not implemented")
+
+    def delete_cloud_account(self, account_name, dry_run=False):
+        cloud_pool_table = self._get_cloud_pool_table(account_name)
+        for pool in cloud_pool_table.values():
+            if pool.in_use():
+                raise ResMgrCloudAccountInUse("Cannot delete cloud which is currently in use")
+
+        # If dry_run is specified, do not actually delete the cloud account
+        if dry_run:
+            return
+
+        for pool in list(cloud_pool_table):
+            self.delete_resource_pool(account_name, pool)
+
+        del self._cloud_pool_table[account_name]
+        del self._cloud_cals[account_name]
+
+    def add_resource_pool(self, cloud_account_name, pool_info):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_info.name in cloud_pool_table:
+            raise ResMgrDuplicatePool("Pool with name: %s already exists", pool_info.name)
+
+        cloud_cal = self._get_cloud_cal_plugin(cloud_account_name)
+        pool = self._pool_class[pool_info.resource_type](self._log, self._loop, pool_info, cloud_cal)
+
+        cloud_pool_table[pool_info.name] = pool
+
+    def delete_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.error("Pool: %s not found for deletion", pool_name)
+            return
+        pool = cloud_pool_table[pool_name]
+
+        if pool.in_use():
+            # Can't delete a pool in use
+            self._log.error("Pool: %s in use. Can not delete in-use pool", pool.name)
+            return
+
+        pool.cleanup()
+        del cloud_pool_table[pool_name]
+        self._log.info("Resource Pool: %s successfully deleted", pool_name)
+
+    def modify_resource_pool(self, cloud_account_name, pool):
+        pass
+
+    def lock_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.info("Pool: %s is not available for lock operation")
+            return
+
+        pool = cloud_pool_table[pool_name]
+        pool.lock_pool()
+
+    def unlock_resource_pool(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name not in cloud_pool_table:
+            self._log.info("Pool: %s is not available for unlock operation")
+            return
+
+        pool = cloud_pool_table[pool_name]
+        pool.unlock_pool()
+
+    def get_resource_pool_info(self, cloud_account_name, pool_name):
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        if pool_name in cloud_pool_table:
+            pool = cloud_pool_table[pool_name]
+            return pool.get_pool_info()
+        else:
+            return None
+
+    def get_resource_pool_list(self, cloud_account_name):
+        return [v for _, v in self._get_cloud_pool_table(cloud_account_name).items()]
+
+    def _select_resource_pools(self, cloud_account_name, resource_type):
+        pools = [pool for pool in self.get_resource_pool_list(cloud_account_name) if pool.resource_type == resource_type and pool.status == 'unlocked']
+        if not pools:
+            raise ResMgrPoolNotAvailable("No %s pool found for resource allocation", resource_type)
+
+        return pools[0]
+
+    @asyncio.coroutine
+    def allocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type):
+        ### Check if event_id is unique or already in use
+        if event_id in self._resource_table:
+            r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+            self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s",
+                              event_id, pool_name)
+            # If resource-type matches then return the same resource
+            cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+            pool = cloud_pool_table[pool_name]
+            if pool.resource_type == resource_type:
+
+                info = yield from pool.read_resource_info(r_id)
+                return info
+            else:
+                self._log.error("Event-id conflict. Duplicate event-id: %s", event_id)
+                raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name))
+
+        ### All-OK, lets go ahead with resource allocation
+        pool = self._select_resource_pools(cloud_account_name, resource_type)
+        self._log.info("Selected pool %s for resource allocation", pool.name)
+
+        r_id, r_info = yield from pool.allocate_resource(request)
+
+        self._resource_table[event_id] = (r_id, cloud_account_name, pool.name)
+        return r_info
+
+    @asyncio.coroutine
+    def reallocate_virtual_resource(self, event_id, cloud_account_name, request, resource_type, resource):
+        ### Check if event_id is unique or already in use
+        if event_id in self._resource_table:
+            r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+            self._log.warning("Requested event-id :%s for resource-allocation already active with pool: %s",
+                              event_id, pool_name)
+            # If resource-type matches then return the same resource
+            cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+            pool = cloud_pool_table[pool_name]
+            if pool.resource_type == resource_type:
+                info = yield from pool.read_resource_info(r_id)
+                return info
+            else:
+                self._log.error("Event-id conflict. Duplicate event-id: %s", event_id)
+                raise ResMgrDuplicateEventId("Requested event-id :%s already active with pool: %s" %(event_id, pool_name))
+
+        r_info = None
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[resource.pool_name]
+        if pool.resource_type == resource_type:
+            if resource_type == 'network':
+              r_id = resource.virtual_link_id
+              r_info = yield from pool.get_info_by_id(resource.virtual_link_id)
+            elif resource_type == 'compute':
+              r_id = resource.vdu_id
+              r_info = yield from pool.get_info_by_id(resource.vdu_id)
+
+        if r_info is None:
+            r_id, r_info = yield from pool.allocate_resource(request)
+            self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
+            return r_info
+
+        self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
+        new_resource = pool._resource_class(r_id, 'dynamic')
+        if resource_type == 'compute':
+            requested_params = RwcalYang.VDUInitParams()
+            requested_params.from_dict(request.as_dict())
+            new_resource.requested_params = requested_params
+        pool._all_resources[r_id] = new_resource
+        pool._allocated_resources[r_id] = new_resource
+        return r_info
+
+    @asyncio.coroutine
+    def release_virtual_resource(self, event_id, resource_type):
+        ### Check if event_id exists
+        if event_id not in self._resource_table:
+            self._log.error("Received resource-release-request with unknown Event-id :%s", event_id)
+            raise ResMgrUnknownEventId("Received resource-release-request with unknown Event-id :%s" %(event_id))
+
+        ## All-OK, lets proceed with resource release
+        r_id, cloud_account_name, pool_name = self._resource_table.pop(event_id)
+        self._log.debug("Attempting to release virtual resource id %s from pool %s",
+                        r_id, pool_name)
+
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[pool_name]
+        yield from pool.release_resource(r_id)
+
+    @asyncio.coroutine
+    def read_virtual_resource(self, event_id, resource_type):
+        ### Check if event_id exists
+        if event_id not in self._resource_table:
+            self._log.error("Received resource-read-request with unknown Event-id :%s", event_id)
+            raise ResMgrUnknownEventId("Received resource-read-request with unknown Event-id :%s" %(event_id))
+
+        ## All-OK, lets proceed
+        r_id, cloud_account_name, pool_name = self._resource_table[event_id]
+        cloud_pool_table = self._get_cloud_pool_table(cloud_account_name)
+        pool = cloud_pool_table[pool_name]
+        info = yield from pool.read_resource_info(r_id)
+        return info
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgr_events.py
new file mode 100755 (executable)
index 0000000..5f87c66
--- /dev/null
@@ -0,0 +1,314 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.tasklets
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class ResourceMgrEvent(object):
+    VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+    VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+
+    def __init__(self, dts, log, loop, parent):
+        self._log = log
+        self._dts = dts
+        self._loop = loop
+        self._parent = parent
+        self._vdu_reg = None
+        self._link_reg = None
+
+        self._vdu_reg_event = asyncio.Event(loop=self._loop)
+        self._link_reg_event = asyncio.Event(loop=self._loop)
+
+    @asyncio.coroutine
+    def wait_ready(self, timeout=5):
+        self._log.debug("Waiting for all request registrations to become ready.")
+        yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
+                                timeout=timeout, loop=self._loop)
+
+    def create_record_dts(self, regh, xact, path, msg):
+        """
+        Create a record in DTS with path and message
+        """
+        self._log.debug("Creating Resource Record xact = %s, %s:%s",
+                        xact, path, msg)
+        regh.create_element(path, msg)
+
+    def delete_record_dts(self, regh, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting Resource Record xact = %s, %s",
+                        xact, path)
+        regh.delete_element(path)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def onlink_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vn(link):
+                """Re-populate the virtual link information after restart
+
+                Arguments:
+                    vlink 
+
+                """
+                # wait for 3 seconds
+                yield from asyncio.sleep(3, loop=self._loop)
+
+                response_info = yield from self._parent.reallocate_virtual_network(link.event_id,
+                                                                                 link.cloud_account,
+                                                                                 link.request_info, link.resource_info,
+                                                                                 )
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              link_cfg = self._link_reg.elements
+              for link in link_cfg:
+                self._loop.create_task(instantiate_realloc_vn(link))
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def onvdu_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vdu(vdu):
+                """Re-populate the VDU information after restart
+
+                Arguments:
+                    vdu 
+
+                """
+                # wait for 3 seconds
+                yield from asyncio.sleep(3, loop=self._loop)
+
+                response_info = yield from self._parent.allocate_virtual_compute(vdu.event_id,
+                                                                                 vdu.cloud_account,
+                                                                                 vdu.request_info
+                                                                                 )
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              vdu_cfg = self._vdu_reg.elements
+              for vdu in vdu_cfg:
+                self._loop.create_task(instantiate_realloc_vdu(vdu))
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def on_link_request_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Received link request commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_link_request_prepare(xact_info, action, ks_path, request_msg):
+            self._log.debug("Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s",
+                            xact_info, action, request_msg)
+
+            response_info = None
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+            schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+
+            if action == rwdts.QueryAction.CREATE:
+                try:
+                    response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id,
+                                                                                 request_msg.cloud_account,
+                                                                                 request_msg.request_info)
+                except Exception as e:
+                    self._log.error("Encountered exception: %s while creating virtual network", str(e))
+                    self._log.exception(e)
+                    response_info = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+                    response_info.resource_state = 'failed'
+                    response_info.resource_errors = str(e)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    request_msg.resource_info = response_info
+                    self.create_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg)
+            elif action == rwdts.QueryAction.DELETE:
+                yield from self._parent.release_virtual_network(pathentry.key00.event_id)
+                self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+            elif action == rwdts.QueryAction.READ:
+                response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+            else:
+                raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
+
+            self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
+                            response_xpath, response_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+
+        def on_vdu_request_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Received vdu request commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def monitor_vdu_state(response_xpath, pathentry):
+            self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+            loop_cnt = 180
+            for i in range(loop_cnt):
+                self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 1 second", response_xpath)
+                yield from asyncio.sleep(1, loop = self._loop)
+                try:
+                    response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+                except Exception as e:
+                    self._log.info("VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring",
+                                   str(e),response_xpath)
+                    response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                    response_info.resource_state = 'failed'
+                    response_info.resource_errors = str(e)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
+                        self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s",
+                                       response_info, response_xpath)
+                        yield from self._dts.query_update(response_xpath,
+                                                          rwdts.XactFlag.ADVISE,
+                                                          response_info)
+                        return
+            else:
+                ### End of loop. This is only possible if VDU did not reach active state
+                err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, loop_cnt)
+                self._log.info(err_msg)
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'failed'
+                response_info.resource_errors = err_msg
+                yield from self._dts.query_update(response_xpath,
+                                                  rwdts.XactFlag.ADVISE,
+                                                  response_info)
+            return
+
+        def allocate_vdu_task(ks_path, event_id, cloud_account, request_msg):
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+            schema = RwResourceMgrYang.VDUEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+            try:
+                response_info = yield from self._parent.allocate_virtual_compute(event_id,
+                                                                                 cloud_account,
+                                                                                 request_msg,)
+            except Exception as e:
+                self._log.error("Encountered exception : %s while creating virtual compute", str(e))
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'failed'
+                response_info.resource_errors = str(e)
+                yield from self._dts.query_update(response_xpath,
+                                                  rwdts.XactFlag.ADVISE,
+                                                  response_info)
+            else:
+                if response_info.resource_state == 'failed' or response_info.resource_state == 'active' :
+                    self._log.info("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
+                                   response_info, response_xpath)
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                else:
+                    asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+                                          loop = self._loop)
+
+
+        @asyncio.coroutine
+        def on_vdu_request_prepare(xact_info, action, ks_path, request_msg):
+            self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
+                            xact_info, action, request_msg)
+            response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+            schema = RwResourceMgrYang.VDUEventData().schema()
+            pathentry = schema.keyspec_to_entry(ks_path)
+
+            if action == rwdts.QueryAction.CREATE:
+                response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+                response_info.resource_state = 'pending'
+                request_msg.resource_info = response_info
+                self.create_record_dts(self._vdu_reg,
+                                       None,
+                                       ks_path.to_xpath(RwResourceMgrYang.get_schema()),
+                                       request_msg)
+                asyncio.ensure_future(allocate_vdu_task(ks_path,
+                                                        pathentry.key00.event_id,
+                                                        request_msg.cloud_account,
+                                                        request_msg.request_info),
+                                      loop = self._loop)
+            elif action == rwdts.QueryAction.DELETE:
+                response_info = None
+                yield from self._parent.release_virtual_compute(pathentry.key00.event_id)
+                self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+            elif action == rwdts.QueryAction.READ:
+                response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+            else:
+                raise ValueError("Only create/delete actions available. Received action: %s" %(action))
+
+            self._log.debug("Responding with VDUInfo at xpath %s: %s",
+                            response_xpath, response_info)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+
+        @asyncio.coroutine
+        def on_request_ready(registration, status):
+            self._log.debug("Got request ready event (registration: %s) (status: %s)",
+                            registration, status)
+
+            if registration == self._link_reg:
+                self._link_reg_event.set()
+            elif registration == self._vdu_reg:
+                self._vdu_reg_event.set()
+            else:
+                self._log.error("Unknown registration ready event: %s", registration)
+
+        link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,)
+        with self._dts.group_create(handler=link_handlers) as link_group:
+            self._log.debug("Registering for Link Resource Request using xpath: %s",
+                            ResourceMgrEvent.VLINK_REQUEST_XPATH)
+
+            self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH,
+                                            handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                                          on_commit=on_link_request_commit,
+                                                                                          on_prepare=on_link_request_prepare),
+                                            flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
+        vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, )
+        with self._dts.group_create(handler=vdu_handlers) as vdu_group:
+
+            self._log.debug("Registering for VDU Resource Request using xpath: %s",
+                            ResourceMgrEvent.VDU_REQUEST_XPATH)
+
+            self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH,
+                                           handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                                         on_commit=on_vdu_request_commit,
+                                                                                         on_prepare=on_vdu_request_prepare),
+                                           flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
diff --git a/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rift/tasklets/rwresmgrtasklet/rwresmgrtasklet.py
new file mode 100755 (executable)
index 0000000..cdcadc7
--- /dev/null
@@ -0,0 +1,232 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    RwYang,
+    RwResourceMgrYang,
+    RwLaunchpadYang,
+    RwcalYang,
+)
+
+import rift.tasklets
+
+from . import rwresmgr_core as Core
+from . import rwresmgr_config as Config
+from . import rwresmgr_events as Event
+
+
+class ResourceManager(object):
+    def __init__(self, log, log_hdl, loop, dts):
+        self._log            = log
+        self._log_hdl        = log_hdl
+        self._loop           = loop
+        self._dts            = dts
+        self.config_handler  = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self)
+        self.event_handler   = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self)
+        self.core            = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self)
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self.config_handler.register()
+        yield from self.event_handler.register()
+
+    def add_cloud_account_config(self, account):
+        self._log.debug("Received Cloud-Account add config event for account: %s", account.name)
+        self.core.add_cloud_account(account)
+
+    def update_cloud_account_config(self, account):
+        self._log.debug("Received Cloud-Account update config event for account: %s", account.name)
+        self.core.update_cloud_account(account)
+
+    def delete_cloud_account_config(self, account_name, dry_run=False):
+        self._log.debug("Received Cloud-Account delete event for account (dry_run: %s): %s",
+                        dry_run, account_name)
+        self.core.delete_cloud_account(account_name, dry_run)
+
+    def get_cloud_account_names(self):
+        cloud_account_names = self.core.get_cloud_account_names()
+        return cloud_account_names
+
+    def pool_add(self, cloud_account_name, pool):
+        self._log.debug("Received Pool add event for cloud account %s pool: %s",
+                        cloud_account_name, pool.name)
+        self.core.add_resource_pool(cloud_account_name, pool)
+
+    def pool_modify(self, cloud_account_name, pool):
+        self._log.debug("Received Pool modify event for cloud account %s pool: %s",
+                        cloud_account_name, pool.name)
+        self.core.modify_resource_pool(cloud_account_name, pool)
+
+    def pool_delete(self, cloud_account_name, pool_name):
+        self._log.debug("Received Pool delete event for cloud account %s pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.delete_resource_pool(cloud_account_name, pool_name)
+
+    def get_pool_list(self, cloud_account_name):
+        return self.core.get_resource_pool_list(cloud_account_name)
+
+    def get_pool_info(self, cloud_account_name, pool_name):
+        self._log.debug("Received get-pool-info event for cloud account %s pool: %s",
+                        cloud_account_name, pool_name)
+        return self.core.get_resource_pool_info(cloud_account_name, pool_name)
+
+    def lock_pool(self, cloud_account_name, pool_name):
+        self._log.debug("Received pool unlock event for pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.lock_resource_pool(cloud_account_name, pool_name)
+
+    def unlock_pool(self, cloud_account_name, pool_name):
+        self._log.debug("Received pool unlock event for pool: %s",
+                        cloud_account_name, pool_name)
+        self.core.unlock_resource_pool(cloud_account_name, pool_name)
+
+    @asyncio.coroutine
+    def allocate_virtual_network(self, event_id, cloud_account_name, request):
+        self._log.info("Received network resource allocation request with event-id: %s", event_id)
+        resource = yield from self.core.allocate_virtual_resource(event_id, cloud_account_name, request, 'network')
+        return resource
+
+    @asyncio.coroutine
+    def reallocate_virtual_network(self, event_id, cloud_account_name, request, resource):
+        self._log.info("Received network resource allocation request with event-id: %s", event_id)
+        resource = yield from self.core.reallocate_virtual_resource(event_id, cloud_account_name, request, 'network', resource)
+        return resource
+
+    @asyncio.coroutine
+    def release_virtual_network(self, event_id):
+        self._log.info("Received network resource release request with event-id: %s", event_id)
+        yield from self.core.release_virtual_resource(event_id, 'network')
+
+    @asyncio.coroutine
+    def read_virtual_network_info(self, event_id):
+        self._log.info("Received network resource read request with event-id: %s", event_id)
+        info = yield from self.core.read_virtual_resource(event_id, 'network')
+        return info
+
+    @asyncio.coroutine
+    def allocate_virtual_compute(self, event_id, cloud_account_name, request):
+        self._log.info("Received compute resource allocation request "
+                       "(cloud account: %s) with event-id: %s",
+                       cloud_account_name, event_id)
+        resource = yield from self.core.allocate_virtual_resource(
+                event_id, cloud_account_name, request, 'compute',
+                )
+        return resource
+
+    @asyncio.coroutine
+    def reallocate_virtual_compute(self, event_id, cloud_account_name, request, resource):
+        self._log.info("Received compute resource allocation request "
+                       "(cloud account: %s) with event-id: %s",
+                       cloud_account_name, event_id)
+        resource = yield from self.core.reallocate_virtual_resource(
+                event_id, cloud_account_name, request, 'compute', resource, 
+                )
+        return resource
+
+    @asyncio.coroutine
+    def release_virtual_compute(self, event_id):
+        self._log.info("Received compute resource release request with event-id: %s", event_id)
+        yield from self.core.release_virtual_resource(event_id, 'compute')
+
+    @asyncio.coroutine
+    def read_virtual_compute_info(self, event_id):
+        self._log.info("Received compute resource read request with event-id: %s", event_id)
+        info = yield from self.core.read_virtual_resource(event_id, 'compute')
+        return info
+
+
+class ResMgrTasklet(rift.tasklets.Tasklet):
+    def __init__(self, *args, **kwargs):
+        super(ResMgrTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-resource-mgr-log")
+        self._dts = None
+        self._resource_manager = None
+
+    def start(self):
+        super(ResMgrTasklet, self).start()
+        self.log.info("Starting ResMgrTasklet")
+
+        self.log.debug("Registering with dts")
+
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwResourceMgrYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def stop(self):
+      try:
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in RESMGR stop:", sys.exc_info()[0])
+         raise
+
+    def on_instance_started(self):
+        self.log.debug("Got instance started callback")
+
+    @asyncio.coroutine
+    def init(self):
+        self._log.info("Initializing the Resource Manager tasklet")
+        self._resource_manager = ResourceManager(self.log,
+                                                 self.log_hdl,
+                                                 self.loop,
+                                                 self._dts)
+        yield from self._resource_manager.register()
+
+    @asyncio.coroutine
+    def run(self):
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py b/rwlaunchpad/plugins/rwresmgr/rwresmgrtasklet.py
new file mode 100755 (executable)
index 0000000..55d2329
--- /dev/null
@@ -0,0 +1,27 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwresmgrtasklet
+class Tasklet(rift.tasklets.rwresmgrtasklet.ResMgrTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py b/rwlaunchpad/plugins/rwresmgr/test/rmmgr_test.py
new file mode 100755 (executable)
index 0000000..87d11a2
--- /dev/null
@@ -0,0 +1,781 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import logging
+import os
+import sys
+import types
+import unittest
+import uuid
+import random
+
+import xmlrunner
+
+import gi
+gi.require_version('CF', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwMain', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwCal', '1.0')
+
+
+import gi.repository.CF as cf
+import gi.repository.RwDts as rwdts
+import gi.repository.RwMain as rwmain
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.RwResourceMgrYang as rmgryang
+from gi.repository import RwcalYang
+from gi.repository import RwCloudYang
+from gi.repository.RwTypes import RwStatus
+
+import rw_peas
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+openstack_info = {
+    'username'      : 'pluto',
+    'password'      : 'mypasswd',
+    'auth_url'      : 'http://10.66.4.14:5000/v3/',
+    'project_name'  : 'demo',
+    'mgmt_network'  : 'private',
+    'image_id'      : '5cece2b1-1a49-42c5-8029-833c56574652',
+    'vms'           : ['res-test-1', 'res-test-2'],
+    'networks'      : ['testnet1', 'testnet2']}
+
+
+def create_mock_resource_temaplate():
+    ### Resource to be reuqested for 'mock'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_cloudsim_resource_template():
+    ### Resource to be reuqested for 'cloudsim'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = "1"
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = "1"
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+def create_mock_resource_temaplate():
+    ### Resource to be reuqested for 'mock'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 20
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_openstack_static_template():
+    ### Resource to be reuqested for 'openstack_static'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 80
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 4096
+    msg.vm_flavor.storage_gb = 40
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    msg.provider_network.physical_network = 'PHYSNET1'
+    msg.provider_network.overlay_type = 'VLAN'
+    msg.provider_network.segmentation_id = 17
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    msg.provider_network.physical_network = 'PHYSNET1'
+    msg.provider_network.overlay_type = 'VLAN'
+    msg.provider_network.segmentation_id = 18
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+def create_openstack_dynamic_template():
+    ### Resource to be reuqested for 'openstack_dynamic'
+    resource_requests = {'compute': {}, 'network': {}}
+
+    ###### mycompute-0
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 2
+    msg.vm_flavor.memory_mb = 4096
+    msg.vm_flavor.storage_gb = 40
+    msg.guest_epa.mempage_size = 'LARGE'
+    msg.guest_epa.cpu_pinning_policy = 'DEDICATED'
+    msg.allocate_public_address = True
+
+    resource_requests['compute']['mycompute-0'] = msg
+
+    ###### mycompute-1
+    msg = rmgryang.VDUEventData_RequestInfo()
+    msg.image_id  = openstack_info['image_id']
+    msg.vm_flavor.vcpu_count = 4
+    msg.vm_flavor.memory_mb = 8192
+    msg.vm_flavor.storage_gb = 40
+    msg.guest_epa.mempage_size = 'LARGE'
+    msg.guest_epa.cpu_pinning_policy = 'DEDICATED'
+    msg.allocate_public_address = True
+
+    resource_requests['compute']['mycompute-1'] = msg
+
+    ####### mynet-0
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    #msg.provider_network.overlay_type = 'VXLAN'
+    #msg.provider_network.segmentation_id = 71
+
+    resource_requests['network']['mynet-0'] = msg
+
+    ####### mynet-1
+    msg = rmgryang.VirtualLinkEventData_RequestInfo()
+    #msg.provider_network.overlay_type = 'VXLAN'
+    #msg.provider_network.segmentation_id = 73
+    resource_requests['network']['mynet-1'] = msg
+
+    return resource_requests
+
+
+
+
+resource_requests = {
+    'mock' : create_mock_resource_temaplate(),
+    'openstack_static': create_openstack_static_template(),
+    'openstack_dynamic': create_openstack_dynamic_template(),
+    'cloudsim': create_cloudsim_resource_template(),
+}
+
+
+def get_cal_account(account_type):
+    """
+    Creates an object for class RwcalYang.CloudAccount()
+    """
+    account = RwcalYang.CloudAccount()
+    if account_type == 'mock':
+        account.name          = 'mock_account'
+        account.account_type  = "mock"
+        account.mock.username = "mock_user"
+    elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+        account.name = 'openstack_cal'
+        account.account_type = 'openstack'
+        account.openstack.key = openstack_info['username']
+        account.openstack.secret       = openstack_info['password']
+        account.openstack.auth_url     = openstack_info['auth_url']
+        account.openstack.tenant       = openstack_info['project_name']
+        account.openstack.mgmt_network = openstack_info['mgmt_network']
+
+    elif account_type == 'cloudsim':
+        account.name          = 'cloudsim'
+        account.account_type  = "cloudsim_proxy"
+
+    return account
+
+def create_cal_plugin(account, log_hdl):
+    plugin_name = getattr(account, account.account_type).plugin_name
+    plugin = rw_peas.PeasPlugin(plugin_name, 'RwCal-1.0')
+    engine, info, extension = plugin()
+    rwcal = plugin.get_interface("Cloud")
+    try:
+        rc = rwcal.init(log_hdl)
+        assert rc == RwStatus.SUCCESS
+    except Exception as e:
+        raise
+    return rwcal
+
+
+class RMMgrTestCase(rift.test.dts.AbstractDTSTest):
+    rwcal = None
+    rwcal_acct_info = None
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        rm_dir = os.environ.get('RM_DIR')
+        cnt_mgr_dir = os.environ.get('CNTR_MGR_DIR')
+        cal_proxy_dir = os.environ.get('CAL_PROXY_DIR')
+
+        cls.rwmain.add_tasklet(cal_proxy_dir, 'rwcalproxytasklet')
+        cls.rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet')
+        cls.rwmain.add_tasklet(cnt_mgr_dir, 'rwcntmgrtasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rmgryang.get_schema()
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(1, loop=self.loop)
+
+    @classmethod
+    def configure_timeout(cls):
+        return 360
+
+    def get_cloud_account_msg(self, acct_type):
+        cloud_account = RwCloudYang.CloudAccount()
+        acct = get_cal_account(acct_type)
+        cloud_account.from_dict(acct.as_dict())
+        cloud_account.name = acct.name
+        return cloud_account
+
+    def get_compute_pool_msg(self, name, pool_type, cloud_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "compute"
+        if pool_type == "static":
+            pool.pool_type = 'static'
+            acct = get_cal_account(cloud_type)
+            rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx())
+            rc, rsp = rwcal.get_vdu_list(acct)
+            assert rc == RwStatus.SUCCESS
+
+            if cloud_type == 'openstack_static':
+                for vdu in rsp.vdu_info_list:
+                    if vdu.name in openstack_info['vms']:
+                        self.log.info("Adding the static compute resource: %s to compute pool", vdu.name)
+                        r = pool.resources.add()
+                        r.resource_id = vdu.vdu_id
+            else:
+                # 'mock', 'cloudsim' 'openstack_dynamic' etc
+                for vdu in rsp.vdu_info_list:
+                    self.log.info("Adding the static compute resource: %s to compute pool", vdu.name)
+                    r = pool.resources.add()
+                    r.resource_id = vdu.vdu_id
+        else:
+            pool.pool_type = 'dynamic'
+            pool.max_size = 10
+        return pool_config
+
+    def get_network_pool_msg(self, name, pool_type, cloud_type):
+        pool_config = rmgryang.ResourcePools()
+        pool = pool_config.pools.add()
+        pool.name = name
+        pool.resource_type = "network"
+        if pool_type == "static":
+            pool.pool_type = 'static'
+            acct = get_cal_account(cloud_type)
+            rwcal = create_cal_plugin(acct, self.tinfo.get_rwlog_ctx())
+            rc, rsp = rwcal.get_virtual_link_list(acct)
+            assert rc == RwStatus.SUCCESS
+            if cloud_type == 'openstack_static':
+                for vlink in rsp.virtual_link_info_list:
+                    if vlink.name in openstack_info['networks']:
+                        self.log.info("Adding the static network resource: %s to network pool", vlink.name)
+                        r = pool.resources.add()
+                        r.resource_id = vlink.virtual_link_id
+            else:
+                # 'mock', 'cloudsim', 'openstack_dynamic' etc
+                for vlink in rsp.virtual_link_info_list:
+                    self.log.info("Adding the static network resource: %s to network pool", vlink.name)
+                    r = pool.resources.add()
+                    r.resource_id = vlink.virtual_link_id
+        else:
+            pool.pool_type = 'dynamic'
+            pool.max_size = 4
+        return pool_config
+
+
+    def get_network_reserve_msg(self, name, cloud_type, xpath):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VirtualLinkEventData()
+        msg.event_id = event_id
+        msg.request_info.name = name
+        attributes = ['physical_network', 'name', 'overlay_type', 'segmentation_id']
+
+        for attr in attributes:
+            if resource_requests[cloud_type]['network'][name].has_field('provider_network'):
+                if resource_requests[cloud_type]['network'][name].provider_network.has_field(attr):
+                    setattr(msg.request_info.provider_network, attr,
+                            getattr(resource_requests[cloud_type]['network'][name].provider_network ,attr))
+
+        return msg, xpath.format(event_id)
+
+    def get_compute_reserve_msg(self, name, cloud_type, xpath, vlinks):
+        event_id = str(uuid.uuid4())
+        msg = rmgryang.VDUEventData()
+        msg.event_id = event_id
+        msg.request_info.name = name
+        msg.request_info.image_id = resource_requests[cloud_type]['compute'][name].image_id
+        attributes = ['image_id', 'vcpu_count', 'memory_mb', 'storage_gb']
+
+        if resource_requests[cloud_type]['compute'][name].has_field('vm_flavor'):
+            for attr in attributes:
+                if resource_requests[cloud_type]['compute'][name].vm_flavor.has_field(attr):
+                    setattr(msg.request_info.vm_flavor,
+                            attr,
+                            getattr(resource_requests[cloud_type]['compute'][name].vm_flavor , attr))
+
+        attributes = ['mempage_size', 'cpu_pinning_policy']
+
+        if resource_requests[cloud_type]['compute'][name].has_field('guest_epa'):
+            for attr in attributes:
+                if resource_requests[cloud_type]['compute'][name].guest_epa.has_field(attr):
+                    setattr(msg.request_info.guest_epa,
+                            attr,
+                            getattr(resource_requests[cloud_type]['compute'][name].guest_epa , attr))
+
+        if resource_requests[cloud_type]['compute'][name].has_field('allocate_public_address'):
+            msg.request_info.allocate_public_address = resource_requests[cloud_type]['compute'][name].allocate_public_address
+
+        cnt = 0
+        for link in vlinks:
+            c1 = msg.request_info.connection_points.add()
+            c1.name = name+"-port-"+str(cnt)
+            cnt += 1
+            c1.virtual_link_id = link
+
+        self.log.info("Sending message :%s", msg)
+        return msg, xpath.format(event_id)
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, acct_type):
+        account_xpath = "C,/rw-cloud:cloud/account"
+        msg = self.get_cloud_account_msg(acct_type)
+        self.log.info("Configuring cloud-account: %s",msg)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+    @asyncio.coroutine
+    def configure_compute_resource_pools(self, dts, resource_type, cloud_type):
+        pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+        msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type)
+        self.log.info("Configuring compute-resource-pool: %s",msg)
+        yield from dts.query_create(pool_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+
+    @asyncio.coroutine
+    def configure_network_resource_pools(self, dts, resource_type, cloud_type):
+        pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+        msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type)
+        self.log.info("Configuring network-resource-pool: %s",msg)
+        yield from dts.query_create(pool_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    msg)
+
+    @asyncio.coroutine
+    def verify_resource_pools_config(self, dts):
+        pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+        self.log.debug("Verifying test_create_resource_pools results")
+        res_iter = yield from dts.query_read(pool_records_xpath,)
+        for result in res_iter:
+            response = yield from result
+            records = response.result.records
+            #self.assertEqual(len(records), 2)
+            #names = [i.name for i in records]
+            #self.assertTrue('virtual-compute' in names)
+            #self.assertTrue('virtual-network' in names)
+            for record in records:
+                self.log.debug("Received Pool Record, Name: %s, Resource Type: %s, Pool Status: %s, Pool Size: %d, Allocated Resources: %d, Free Resources: %d",
+                               record.name,
+                               record.resource_type,
+                               record.pool_status,
+                               record.total_resources,
+                               record.allocated_resources,
+                               record.free_resources)
+
+    @asyncio.coroutine
+    def read_resource(self, dts, xpath):
+        self.log.debug("Reading data for XPATH:%s", xpath)
+        result = yield from dts.query_read(xpath, rwdts.XactFlag.MERGE)
+        msg = None
+        for r in result:
+            msg = yield from r
+        self.log.debug("Received data: %s", msg.result)
+        return msg.result
+
+    @asyncio.coroutine
+    def reserve_network_resources(self, name, dts, cloud_type):
+        network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+        msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath)
+        self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
+        yield from dts.query_create(xpath, 0, msg)
+        return xpath
+
+
+    @asyncio.coroutine
+    def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []):
+        compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+        msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks)
+        self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
+        yield from dts.query_create(xpath, 0, msg)
+        return xpath
+
+    @asyncio.coroutine
+    def release_network_resources(self, dts, xpath):
+        self.log.debug("Initiating network resource release for  : %s ", xpath)
+        yield from dts.query_delete(xpath, 0)
+
+    @asyncio.coroutine
+    def release_compute_resources(self, dts, xpath):
+        self.log.debug("Initiating compute resource release for  : %s ", xpath)
+        yield from dts.query_delete(xpath, 0)
+
+    @unittest.skip("Skipping test_static_pool_resource_allocation")
+    def test_static_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_static_pool_resource_allocation")
+        tinfo = self.new_tinfo('static_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'mock'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+
+            yield from self.configure_network_resource_pools(dts, "static", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "static", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_static_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_dynamic_pool_resource_allocation")
+    def test_dynamic_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'mock'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_dynamic_pool_resource_allocation")
+    def test_dynamic_cloudsim_pool_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_mock')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'cloudsim'
+
+            yield from asyncio.sleep(120, loop=self.loop)
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-"+str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_resource_allocation")
+
+    @unittest.skip("Skipping test_static_pool_openstack_resource_allocation")
+    def test_static_pool_openstack_resource_allocation(self):
+        self.log.debug("STARTING - test_static_pool_openstack_resource_allocation")
+        tinfo = self.new_tinfo('static_openstack')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'openstack_static'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "static", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "static", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            self.log.debug("Creating virtual-network-resources in openstack")
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+            self.log.debug("virtual-network-resources successfully created in openstack")
+
+            self.log.debug("Creating virtual-network-compute in openstack")
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                yield from self.release_compute_resources(dts, r[0])
+
+            yield from self.release_network_resources(dts,networks[0][0])
+            yield from self.verify_resource_pools_config(dts)
+            self.log.debug("Openstack static resource allocation completed")
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_static_pool_openstack_resource_allocation")
+
+    #@unittest.skip("Skipping test_dynamic_pool_openstack_resource_allocation")
+    def test_dynamic_pool_openstack_resource_allocation(self):
+        self.log.debug("STARTING - test_dynamic_pool_openstack_resource_allocation")
+        tinfo = self.new_tinfo('dynamic_openstack')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+            cloud_type = 'openstack_dynamic'
+            yield from self.wait_tasklets()
+            yield from self.configure_cloud_account(dts, cloud_type)
+            yield from self.configure_network_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.configure_compute_resource_pools(dts, "dynamic", cloud_type)
+            yield from self.verify_resource_pools_config(dts)
+
+            self.log.debug("Creating virtual-network-resources in openstack")
+            r_xpath = yield from self.reserve_network_resources('mynet-0', dts, cloud_type)
+            r_info = yield from self.read_resource(dts,r_xpath)
+            networks.append((r_xpath, r_info.resource_info))
+            self.log.debug("virtual-network-resources successfully created in openstack")
+
+            self.log.debug("Creating virtual-network-compute in openstack")
+            for i in range(2):
+                r_xpath = yield from self.reserve_compute_resources("mycompute-" + str(i),
+                                                                    dts,
+                                                                    cloud_type,
+                                                                    [networks[0][1].virtual_link_id])
+                r_info = yield from self.read_resource(dts,r_xpath)
+                computes.append((r_xpath, r_info))
+
+            yield from self.verify_resource_pools_config(dts)
+            for r in computes:
+                self.log.debug("Releasing compute resource with id: %s", r[1].resource_info.vdu_id)
+                #yield from self.release_compute_resources(dts, r[0])
+
+            self.log.debug("Releasing network resource with id: %s", r[1].resource_info.vdu_id)
+            #yield from self.release_network_resources(dts,networks[0][0])
+            #yield from self.verify_resource_pools_config(dts)
+            self.log.debug("Openstack dynamic resource allocation completed")
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_dynamic_pool_openstack_resource_allocation")
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    if 'RM_DIR' not in os.environ:
+        os.environ['RM_DIR'] = os.path.join(plugin_dir, 'rwresmgrtasklet')
+
+    if 'CAL_PROXY_DIR' not in os.environ:
+        os.environ['CAL_PROXY_DIR'] = os.path.join(plugin_dir, 'rwcalproxytasklet')
+
+    if 'CNTR_MGR_DIR' not in os.environ:
+        os.environ['CNTR_MGR_DIR'] = os.path.join(plugin_dir, 'rwcntmgrtasklet')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt b/rwlaunchpad/plugins/rwvnfm/CMakeLists.txt
new file mode 100644 (file)
index 0000000..7bc05a7
--- /dev/null
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwvnfmtasklet)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
+
diff --git a/rwlaunchpad/plugins/rwvnfm/Makefile b/rwlaunchpad/plugins/rwvnfm/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/__init__.py
new file mode 100644 (file)
index 0000000..9728738
--- /dev/null
@@ -0,0 +1 @@
+from .rwvnfmtasklet import VnfmTasklet
\ No newline at end of file
diff --git a/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rift/tasklets/rwvnfmtasklet/rwvnfmtasklet.py
new file mode 100755 (executable)
index 0000000..17e6fbf
--- /dev/null
@@ -0,0 +1,2751 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import collections
+import enum
+import logging
+import uuid
+import time
+import os.path
+import re
+import shutil
+import sys
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwVnfmYang', '1.0')
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+
+from gi.repository import (
+    RwDts as rwdts,
+    RwVnfrYang,
+    RwVnfmYang,
+    RwVlrYang,
+    VnfrYang,
+    RwManifestYang,
+    RwBaseYang,
+    RwResourceMgrYang,
+    ProtobufC,
+)
+
+import rift.tasklets
+import rift.package.store
+import rift.package.cloud_init
+
+
+class VMResourceError(Exception):
+    """ VM resource Error"""
+    pass
+
+
+class VnfRecordError(Exception):
+    """ VNF record instatiation failed"""
+    pass
+
+
+class VduRecordError(Exception):
+    """ VDU record instatiation failed"""
+    pass
+
+
+class NotImplemented(Exception):
+    """Not implemented """
+    pass
+
+
+class VnfrRecordExistsError(Exception):
+    """VNFR record already exist with the same VNFR id"""
+    pass
+
+
+class InternalVirtualLinkRecordError(Exception):
+    """Internal virtual link record error"""
+    pass
+
+
+class VDUImageNotFound(Exception):
+    """VDU Image not found error"""
+    pass
+
+
+class VirtualDeploymentUnitRecordError(Exception):
+    """VDU Instantiation failed"""
+    pass
+
+
+class VMNotReadyError(Exception):
+    """ VM Not yet received from resource manager """
+    pass
+
+
+class VDURecordNotFound(Exception):
+    """ Could not find a VDU record """
+    pass
+
+
+class VirtualNetworkFunctionRecordDescNotFound(Exception):
+    """ Cannot find Virtual Network Function Record Descriptor """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorError(Exception):
+    """ Virtual Network Function Record Descriptor Error """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorNotFound(Exception):
+    """ Virtual Network Function Record Descriptor Not Found """
+    pass
+
+
+class VirtualNetworkFunctionRecordNotFound(Exception):
+    """ Virtual Network Function Record Not Found """
+    pass
+
+
+class VirtualNetworkFunctionDescriptorRefCountExists(Exception):
+    """ Virtual Network Funtion Descriptor reference count exists """
+    pass
+
+
+class VnfrInstantiationFailed(Exception):
+    """ Virtual Network Funtion Instantiation failed"""
+    pass
+
+
+class VNFMPlacementGroupError(Exception):
+    pass
+
+class VirtualNetworkFunctionRecordState(enum.Enum):
+    """ VNFR state """
+    INIT = 1
+    VL_INIT_PHASE = 2
+    VM_INIT_PHASE = 3
+    READY = 4
+    TERMINATE = 5
+    VL_TERMINATE_PHASE = 6
+    VDU_TERMINATE_PHASE = 7
+    TERMINATED = 7
+    FAILED = 10
+
+
+class VDURecordState(enum.Enum):
+    """VDU record state """
+    INIT = 1
+    INSTANTIATING = 2
+    RESOURCE_ALLOC_PENDING = 3
+    READY = 4
+    TERMINATING = 5
+    TERMINATED = 6
+    FAILED = 10
+
+
+class VcsComponent(object):
+    """ VCS Component within the VNF descriptor """
+    def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._component = component
+        self._cluster_name = cluster_name
+        self._vcs_handler = vcs_handler
+        self._mangled_name = mangled_name
+
+    @staticmethod
+    def mangle_name(component_name, vnf_name, vnfd_id):
+        """ mangled  component name """
+        return vnf_name + ":" + component_name + ":" + vnfd_id
+
+    @property
+    def name(self):
+        """ name of this component"""
+        return self._mangled_name
+
+    @property
+    def path(self):
+        """ The path for this object """
+        return("D,/rw-manifest:manifest" +
+               "/rw-manifest:operational-inventory" +
+               "/rw-manifest:component" +
+               "[rw-manifest:component-name = '{}']").format(self.name)
+
+    @property
+    def instance_xpath(self):
+        """ The path for this object """
+        return("D,/rw-base:vcs" +
+               "/instances" +
+               "/instance" +
+               "[instance-name = '{}']".format(self._cluster_name))
+
+    @property
+    def start_comp_xpath(self):
+        """ start component xpath """
+        return (self.instance_xpath +
+                "/child-n[instance-name = 'START-REQ']")
+
+    def get_start_comp_msg(self, ip_address):
+        """ start this component """
+        start_msg = RwBaseYang.VcsInstance_Instance_ChildN()
+        start_msg.instance_name = 'START-REQ'
+        start_msg.component_name = self.name
+        start_msg.admin_command = "START"
+        start_msg.ip_address = ip_address
+
+        return start_msg
+
+    @property
+    def msg(self):
+        """ Returns the message for this vcs component"""
+
+        vcs_comp_dict = self._component.as_dict()
+
+        def mangle_comp_names(comp_dict):
+            """ mangle component name  with VNF name, id"""
+            for key, val in comp_dict.items():
+                if isinstance(val, dict):
+                    comp_dict[key] = mangle_comp_names(val)
+                elif isinstance(val, list):
+                    i = 0
+                    for ent in val:
+                        if isinstance(ent, dict):
+                            val[i] = mangle_comp_names(ent)
+                        else:
+                            val[i] = ent
+                        i += 1
+                elif key == "component_name":
+                    comp_dict[key] = VcsComponent.mangle_name(val,
+                                                              self._vnfd_name,
+                                                              self._vnfd_id)
+            return comp_dict
+
+        mangled_dict = mangle_comp_names(vcs_comp_dict)
+        msg = RwManifestYang.OpInventory_Component.from_dict(mangled_dict)
+        return msg
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ Publishes the VCS component """
+        self._log.debug("Publishing the VcsComponent %s, path = %s comp = %s",
+                        self.name, self.path, self.msg)
+        yield from self._vcs_handler.publish(xact, self.path, self.msg)
+
+    @asyncio.coroutine
+    def start(self, xact, parent, ip_addr=None):
+        """ Starts this VCS component """
+        # ATTN RV - replace with block add
+        start_msg = self.get_start_comp_msg(ip_addr)
+        self._log.debug("starting component %s %s",
+                        self.start_comp_xpath, start_msg)
+        yield from self._dts.query_create(self.start_comp_xpath,
+                                          0,
+                                          start_msg)
+        self._log.debug("started component %s, %s",
+                        self.start_comp_xpath, start_msg)
+
+
+class VirtualDeploymentUnitRecord(object):
+    """  Virtual Deployment Unit Record """
+    def __init__(self,
+                 dts,
+                 log,
+                 loop,
+                 vdud,
+                 vnfr,
+                 mgmt_intf,
+                 cloud_account_name,
+                 vnfd_package_store,
+                 vdur_id=None,
+                 placement_groups=[]):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vdud = vdud
+        self._vnfr = vnfr
+        self._mgmt_intf = mgmt_intf
+        self._cloud_account_name = cloud_account_name
+        self._vnfd_package_store = vnfd_package_store
+
+        self._vdur_id = vdur_id or str(uuid.uuid4())
+        self._int_intf = []
+        self._ext_intf = []
+        self._state = VDURecordState.INIT
+        self._state_failed_reason = None
+        self._request_id = str(uuid.uuid4())
+        self._name = vnfr.name + "__" + vdud.id
+        self._placement_groups = placement_groups
+        self._rm_regh = None
+        self._vm_resp = None
+        self._vdud_cloud_init = None
+        self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+
+    @asyncio.coroutine
+    def vdu_opdata_register(self):
+        yield from self._vdur_console_handler.register()
+
+    def cp_ip_addr(self, cp_name):
+        """ Find ip address by connection point name """
+        if self._vm_resp is not None:
+            for conn_point in self._vm_resp.connection_points:
+                if conn_point.name == cp_name:
+                    return conn_point.ip_address
+        return "0.0.0.0"
+
+    def cp_id(self, cp_name):
+        """ Find connection point id  by connection point name """
+        if self._vm_resp is not None:
+            for conn_point in self._vm_resp.connection_points:
+                if conn_point.name == cp_name:
+                    return conn_point.connection_point_id
+        return ''
+
+    @property
+    def vdu_id(self):
+        return self._vdud.id
+
+    @property
+    def vm_resp(self):
+        return self._vm_resp
+
+    @property
+    def name(self):
+        """ Return this VDUR's name """
+        return self._name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud account this VDU should be created in """
+        return self._cloud_account_name
+
+    @property
+    def image_name(self):
+        """ name that should be used to lookup the image on the CMP """
+        return os.path.basename(self._vdud.image)
+
+    @property
+    def image_checksum(self):
+        """ name that should be used to lookup the image on the CMP """
+        return self._vdud.image_checksum if self._vdud.has_field("image_checksum") else None
+
+    @property
+    def management_ip(self):
+        if not self.active:
+            return None
+        return self._vm_resp.public_ip if self._vm_resp.has_field('public_ip') else self._vm_resp.management_ip
+
+    @property
+    def vm_management_ip(self):
+        if not self.active:
+            return None
+        return self._vm_resp.management_ip
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VDU"""
+        op_stats_dict = {"INIT": "init",
+                         "INSTANTIATING": "vm_init_phase",
+                         "RESOURCE_ALLOC_PENDING": "vm_alloc_pending",
+                         "READY": "running",
+                         "FAILED": "failed",
+                         "TERMINATING": "terminated",
+                         "TERMINATED": "terminated",
+                         }
+        return op_stats_dict[self._state.name]
+
+    @property
+    def msg(self):
+        """ VDU message """
+        vdu_fields = ["vm_flavor",
+                      "guest_epa",
+                      "vswitch_epa",
+                      "hypervisor_epa",
+                      "host_epa",
+                      "name"]
+        vdu_copy_dict = {k: v for k, v in
+                         self._vdud.as_dict().items() if k in vdu_fields}
+        vdur_dict = {"id": self._vdur_id,
+                     "vdu_id_ref": self._vdud.id,
+                     "operational_status": self.operational_status,
+                     "operational_status_details": self._state_failed_reason,
+                     }
+        if self.vm_resp is not None:
+            vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
+                              "flavor_id": self.vm_resp.flavor_id,
+                              "image_id": self.vm_resp.image_id,
+                              })
+
+        if self.management_ip is not None:
+            vdur_dict["management_ip"] = self.management_ip
+
+        if self.vm_management_ip is not None:
+            vdur_dict["vm_management_ip"] = self.vm_management_ip
+
+        vdur_dict.update(vdu_copy_dict)
+
+        icp_list = []
+        ii_list = []
+
+        for intf, cp_id, vlr in self._int_intf:
+            cp = self.find_internal_cp_by_cp_id(cp_id)
+
+            icp_list.append({"name": cp.name,
+                             "id": cp.id,
+                             "type_yang": "VPORT",
+                             "ip_address": self.cp_ip_addr(cp.id)})
+
+            ii_list.append({"name": intf.name,
+                            "vdur_internal_connection_point_ref": cp.id,
+                            "virtual_interface": {}})
+
+        vdur_dict["internal_connection_point"] = icp_list
+        self._log.debug("internal_connection_point:%s", vdur_dict["internal_connection_point"])
+        vdur_dict["internal_interface"] = ii_list
+
+        ei_list = []
+        for intf, cp, vlr in self._ext_intf:
+            ei_list.append({"name": cp,
+                            "vnfd_connection_point_ref": cp,
+                            "virtual_interface": {}})
+            self._vnfr.update_cp(cp, self.cp_ip_addr(cp), self.cp_id(cp))
+
+        vdur_dict["external_interface"] = ei_list
+
+        placement_groups = []
+        for group in self._placement_groups:
+            placement_groups.append(group.as_dict())
+
+        vdur_dict['placement_groups_info'] = placement_groups
+        return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
+
+    @property
+    def resmgr_path(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-resource-mgr:resource-mgmt" +
+                "/vdu-event" +
+                "/vdu-event-data[event-id='{}']".format(self._request_id))
+
+    @property
+    def vm_flavor_msg(self):
+        """ VM flavor message """
+        flavor = self._vdud.vm_flavor.__class__()
+        flavor.copy_from(self._vdud.vm_flavor)
+
+        return flavor
+
+    @property
+    def vdud_cloud_init(self):
+        """ Return the cloud-init contents for the VDU """
+        if self._vdud_cloud_init is None:
+            self._vdud_cloud_init = self.cloud_init()
+
+        return self._vdud_cloud_init
+
+    def cloud_init(self):
+        """ Populate cloud_init with cloud-config script from
+            either the inline contents or from the file provided
+        """
+        if self._vdud.cloud_init is not None:
+            self._log.debug("cloud_init script provided inline %s", self._vdud.cloud_init)
+            return self._vdud.cloud_init
+        elif self._vdud.cloud_init_file is not None:
+            # Get cloud-init script contents from the file provided in the cloud_init_file param
+            self._log.debug("cloud_init script provided in file %s", self._vdud.cloud_init_file)
+            filename = self._vdud.cloud_init_file
+            self._vnfd_package_store.refresh()
+            stored_package = self._vnfd_package_store.get_package(self._vnfr.vnfd_id)
+            cloud_init_extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
+            try:
+                return cloud_init_extractor.read_script(stored_package, filename)
+            except rift.package.cloud_init.CloudInitExtractionError as e:
+                raise VirtualDeploymentUnitRecordError(e)
+        else:
+            self._log.debug("VDU Instantiation: cloud-init script not provided")
+
+    def process_openstack_placement_group_construct(self, vm_create_msg_dict):
+        host_aggregates = []
+        availability_zones = []
+        server_groups = []
+        for group in self._placement_groups:
+            if group.has_field('host_aggregate'):
+                for aggregate in group.host_aggregate:
+                    host_aggregates.append(aggregate.as_dict())
+            if group.has_field('availability_zone'):
+                availability_zones.append(group.availability_zone.as_dict())
+            if group.has_field('server_group'):
+                server_groups.append(group.server_group.as_dict())
+
+        if availability_zones:
+            if len(availability_zones) > 1:
+                self._log.error("Can not launch VDU: %s in multiple availability zones. Requested Zones: %s", self.name, availability_zones)
+                raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability zones. Requsted Zones".format(self.name, availability_zones))
+            else:
+                vm_create_msg_dict['availability_zone'] = availability_zones[0]
+
+        if server_groups:
+            if len(server_groups) > 1:
+                self._log.error("Can not launch VDU: %s in multiple Server Group. Requested Groups: %s", self.name, server_groups)
+                raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple Server Groups. Requsted Groups".format(self.name, server_groups))
+            else:
+                vm_create_msg_dict['server_group'] = server_groups[0]
+
+        if host_aggregates:
+            vm_create_msg_dict['host_aggregate'] = host_aggregates
+
+        return
+
+    def process_placement_groups(self, vm_create_msg_dict):
+        """Process the placement_groups and fill resource-mgr request"""
+        if not self._placement_groups:
+            return
+
+        cloud_set = set([group.cloud_type for group in self._placement_groups])
+        assert len(cloud_set) == 1
+        cloud_type = cloud_set.pop()
+
+        if cloud_type == 'openstack':
+            self.process_openstack_placement_group_construct(vm_create_msg_dict)
+
+        else:
+            self._log.info("Ignoring placement group with cloud construct for cloud-type: %s", cloud_type)
+        return
+
+    def resmgr_msg(self, config=None):
+        vdu_fields = ["vm_flavor",
+                      "guest_epa",
+                      "vswitch_epa",
+                      "hypervisor_epa",
+                      "host_epa"]
+
+        self._log.debug("Creating params based on VDUD: %s", self._vdud)
+        vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
+
+        vm_create_msg_dict = {
+                "name": self.name,
+                "image_name": self.image_name,
+                }
+
+        if self.image_checksum is not None:
+            vm_create_msg_dict["image_checksum"] = self.image_checksum
+
+        vm_create_msg_dict["allocate_public_address"] = self._mgmt_intf
+        if self._vdud.has_field('mgmt_vpci'):
+            vm_create_msg_dict["mgmt_vpci"] = self._vdud.mgmt_vpci
+
+        self._log.debug("VDUD: %s", self._vdud)
+        if config is not None:
+            vm_create_msg_dict['vdu_init'] = {'userdata': config}
+
+        cp_list = []
+        for intf, cp, vlr in self._ext_intf:
+            cp_info = {"name": cp,
+                       "virtual_link_id": vlr.network_id,
+                       "type_yang": intf.virtual_interface.type_yang}
+
+            if (intf.virtual_interface.has_field('vpci') and
+                    intf.virtual_interface.vpci is not None):
+                cp_info["vpci"] =  intf.virtual_interface.vpci
+
+            if (vlr.has_field('ip_profile_params')) and (vlr.ip_profile_params.has_field('security_group')):
+                cp_info['security_group'] = vlr.ip_profile_params.security_group
+                
+            cp_list.append(cp_info)
+
+        for intf, cp, vlr in self._int_intf:
+            if (intf.virtual_interface.has_field('vpci') and
+                    intf.virtual_interface.vpci is not None):
+                cp_list.append({"name": cp,
+                                "virtual_link_id": vlr.network_id,
+                                "type_yang": intf.virtual_interface.type_yang,
+                                "vpci": intf.virtual_interface.vpci})
+            else:
+                cp_list.append({"name": cp,
+                                "virtual_link_id": vlr.network_id,
+                                "type_yang": intf.virtual_interface.type_yang})
+
+        vm_create_msg_dict["connection_points"] = cp_list
+        vm_create_msg_dict.update(vdu_copy_dict)
+
+        self.process_placement_groups(vm_create_msg_dict)
+
+        msg = RwResourceMgrYang.VDUEventData()
+        msg.event_id = self._request_id
+        msg.cloud_account = self.cloud_account_name
+        msg.request_info.from_dict(vm_create_msg_dict)
+        return msg
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Delete resource in VIM """
+        if self._state != VDURecordState.READY and self._state != VDURecordState.FAILED:
+            self._log.warning("VDU terminate in not ready state - Ignoring request")
+            return
+
+        self._state = VDURecordState.TERMINATING
+        if self._vm_resp is not None:
+            try:
+                with self._dts.transaction() as new_xact:
+                    yield from self.delete_resource(new_xact)
+            except Exception:
+                self._log.exception("Caught exception while deleting VDU %s", self.vdu_id)
+
+        if self._rm_regh is not None:
+            self._log.debug("Deregistering resource manager registration handle")
+            self._rm_regh.deregister()
+            self._rm_regh = None
+
+        if self._vdur_console_handler is not None:
+            self._log.error("Deregistering vnfr vdur registration handle")
+            self._vdur_console_handler._regh.deregister()
+            self._vdur_console_handler._regh = None
+
+        self._state = VDURecordState.TERMINATED
+
+    def find_internal_cp_by_cp_id(self, cp_id):
+        """ Find the CP corresponding to the connection point id"""
+        cp = None
+
+        self._log.debug("find_internal_cp_by_cp_id(%s) called",
+                        cp_id)
+
+        for int_cp in self._vdud.internal_connection_point:
+            self._log.debug("Checking for int cp %s in internal connection points",
+                            int_cp.id)
+            if int_cp.id == cp_id:
+                cp = int_cp
+                break
+
+        if cp is None:
+            self._log.debug("Failed to find cp %s in internal connection points",
+                            cp_id)
+            msg = "Failed to find cp %s in internal connection points" % cp_id
+            raise VduRecordError(msg)
+
+        # return the VLR associated with the connection point
+        return cp
+
+    @asyncio.coroutine
+    def create_resource(self, xact, vnfr, config=None):
+        """ Request resource from ResourceMgr """
+        def find_cp_by_name(cp_name):
+            """ Find a connection point by name """
+            cp = None
+            self._log.debug("find_cp_by_name(%s) called", cp_name)
+            for ext_cp in vnfr._cprs:
+                self._log.debug("Checking ext cp (%s) called", ext_cp.name)
+                if ext_cp.name == cp_name:
+                    cp = ext_cp
+                    break
+            if cp is None:
+                self._log.debug("Failed to find cp %s in external connection points",
+                                cp_name)
+            return cp
+
+        def find_internal_vlr_by_cp_name(cp_name):
+            """ Find the VLR corresponding to the connection point name"""
+            cp = None
+
+            self._log.debug("find_internal_vlr_by_cp_name(%s) called",
+                            cp_name)
+
+            for int_cp in self._vdud.internal_connection_point:
+                self._log.debug("Checking for int cp %s in internal connection points",
+                                int_cp.id)
+                if int_cp.id == cp_name:
+                    cp = int_cp
+                    break
+
+            if cp is None:
+                self._log.debug("Failed to find cp %s in internal connection points",
+                                cp_name)
+                msg = "Failed to find cp %s in internal connection points" % cp_name
+                raise VduRecordError(msg)
+
+            # return the VLR associated with the connection point
+            return vnfr.find_vlr_by_cp(cp_name)
+
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: create",
+                        self._request_id)
+
+        # Resolve the networks associated external interfaces
+        for ext_intf in self._vdud.external_interface:
+            self._log.debug("Resolving external interface name [%s], cp[%s]",
+                            ext_intf.name, ext_intf.vnfd_connection_point_ref)
+            cp = find_cp_by_name(ext_intf.vnfd_connection_point_ref)
+            if cp is None:
+                self._log.debug("Failed to find connection point - %s",
+                                ext_intf.vnfd_connection_point_ref)
+                continue
+            self._log.debug("Connection point name [%s], type[%s]",
+                            cp.name, cp.type_yang)
+
+            vlr = vnfr.ext_vlr_by_id(cp.vlr_ref)
+
+            etuple = (ext_intf, cp.name, vlr)
+            self._ext_intf.append(etuple)
+
+            self._log.debug("Created external interface tuple  : %s", etuple)
+
+        # Resolve the networks associated internal interfaces
+        for intf in self._vdud.internal_interface:
+            cp_id = intf.vdu_internal_connection_point_ref
+            self._log.debug("Resolving internal interface name [%s], cp[%s]",
+                            intf.name, cp_id)
+
+            try:
+                vlr = find_internal_vlr_by_cp_name(cp_id)
+            except Exception as e:
+                self._log.debug("Failed to find cp %s in internal VLR list", cp_id)
+                msg = "Failed to find cp %s in internal VLR list, e = %s" % (cp_id, e)
+                raise VduRecordError(msg)
+
+            ituple = (intf, cp_id, vlr)
+            self._int_intf.append(ituple)
+
+            self._log.debug("Created internal interface tuple  : %s", ituple)
+
+        resmgr_path = self.resmgr_path
+        resmgr_msg = self.resmgr_msg(config)
+
+        self._log.debug("Creating new VM request at: %s, params: %s", resmgr_path, resmgr_msg)
+        block.add_query_create(resmgr_path, resmgr_msg)
+
+        res_iter = yield from block.execute(now=True)
+
+        resp = None
+
+        for i in res_iter:
+            r = yield from i
+            resp = r.result
+
+        if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('resource_state')):
+            raise VMResourceError("Did not get a vm resource response (resp: %s)", resp)
+        self._log.debug("Got vm request response: %s", resp.resource_info)
+        return resp.resource_info
+
+    @asyncio.coroutine
+    def delete_resource(self, xact):
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: delete",
+                        self._request_id)
+
+        block.add_query_delete(self.resmgr_path)
+
+        yield from block.execute(flags=0, now=True)
+
+    @asyncio.coroutine
+    def read_resource(self, xact):
+        block = xact.block_create()
+
+        self._log.debug("Executing vm request id: %s, action: delete",
+                        self._request_id)
+
+        block.add_query_read(self.resmgr_path)
+
+        res_iter = yield from block.execute(flags=0, now=True)
+        for i in res_iter:
+            r = yield from i
+            resp = r.result
+
+        if resp is None or not (resp.has_field('resource_info') and resp.resource_info.has_field('resource_state')):
+            raise VMResourceError("Did not get a vm resource response (resp: %s)", resp)
+        self._log.debug("Got vm request response: %s", resp.resource_info)
+        #self._vm_resp = resp.resource_info
+        return resp.resource_info
+
+
+    @asyncio.coroutine
+    def start_component(self):
+        """ This VDUR is active """
+        self._log.debug("Starting component %s for  vdud %s vdur %s",
+                        self._vdud.vcs_component_ref,
+                        self._vdud,
+                        self._vdur_id)
+        yield from self._vnfr.start_component(self._vdud.vcs_component_ref,
+                                              self.vm_resp.management_ip)
+
+    @property
+    def active(self):
+        """ Is this VDU active """
+        return True if self._state is VDURecordState.READY else False
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ VDU instantiation failed """
+        self._log.debug("VDU %s instantiation failed ", self._vdur_id)
+        self._state = VDURecordState.FAILED
+        self._state_failed_reason = failed_reason
+        yield from self._vnfr.instantiation_failed(failed_reason)
+
+    @asyncio.coroutine
+    def vdu_is_active(self):
+        """ This VDU is active"""
+        if self.active:
+            self._log.warning("VDU %s was already marked as active", self._vdur_id)
+            return
+
+        self._log.debug("VDUR id %s in VNFR %s is active", self._vdur_id, self._vnfr.vnfr_id)
+
+        if self._vdud.vcs_component_ref is not None:
+            yield from self.start_component()
+
+        self._state = VDURecordState.READY
+
+        if self._vnfr.all_vdus_active():
+            self._log.debug("Inside vdu_is_active. VNFR is READY. Info: %s", self._vnfr)
+            yield from self._vnfr.is_ready()
+
+    @asyncio.coroutine
+    def instantiate(self, xact, vnfr, config=None):
+        """ Instantiate this VDU """
+        self._state = VDURecordState.INSTANTIATING
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ This VDUR is active """
+            self._log.debug("Received VDUR instantiate on_prepare (%s:%s:%s)",
+                            query_action,
+                            ks_path,
+                            msg)
+
+            if (query_action == rwdts.QueryAction.UPDATE or
+                    query_action == rwdts.QueryAction.CREATE):
+                self._vm_resp = msg
+
+                if msg.resource_state == "active":
+                    # Move this VDU to ready state
+                    yield from self.vdu_is_active()
+                elif msg.resource_state == "failed":
+                    yield from self.instantiation_failed(msg.resource_errors)
+            elif query_action == rwdts.QueryAction.DELETE:
+                self._log.debug("DELETE action in on_prepare for VDUR instantiation, ignoring")
+            else:
+                raise NotImplementedError(
+                    "%s action on VirtualDeployementUnitRecord not supported",
+                    query_action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        try:
+            reg_event = asyncio.Event(loop=self._loop)
+
+            @asyncio.coroutine
+            def on_ready(regh, status):
+                reg_event.set()
+
+            handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare, on_ready=on_ready)
+            self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info',
+                                                          flags=rwdts.Flag.SUBSCRIBER,
+                                                          handler=handler)
+            yield from reg_event.wait()
+
+            vm_resp = yield from self.create_resource(xact, vnfr, config)
+            self._vm_resp = vm_resp
+
+            self._state = VDURecordState.RESOURCE_ALLOC_PENDING
+            self._log.debug("Requested VM from resource manager response %s",
+                            vm_resp)
+            if vm_resp.resource_state == "active":
+                self._log.debug("Resourcemgr responded wih an active vm resp %s",
+                                vm_resp)
+                yield from self.vdu_is_active()
+                self._state = VDURecordState.READY
+            elif (vm_resp.resource_state == "pending" or
+                  vm_resp.resource_state == "inactive"):
+                self._log.debug("Resourcemgr responded wih a pending vm resp %s",
+                                vm_resp)
+                # handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+                # self._rm_regh = yield from self._dts.register(self.resmgr_path + '/resource-info',
+                #                                              flags=rwdts.Flag.SUBSCRIBER,
+                #                                              handler=handler)
+            else:
+                self._log.debug("Resourcemgr responded wih an error vm resp %s",
+                                vm_resp)
+                raise VirtualDeploymentUnitRecordError(
+                    "Failed VDUR instantiation %s " % vm_resp)
+
+        except Exception as e:
+            import traceback
+            traceback.print_exc()
+            self._log.exception(e)
+            self._log.error("Instantiation of VDU record failed: %s", str(e))
+            self._state = VDURecordState.FAILED
+            yield from self.instantiation_failed(str(e))
+
+
+class VlRecordState(enum.Enum):
+    """ VL Record State """
+    INIT = 101
+    INSTANTIATION_PENDING = 102
+    ACTIVE = 103
+    TERMINATE_PENDING = 104
+    TERMINATED = 105
+    FAILED = 106
+
+
+class InternalVirtualLinkRecord(object):
+    """ Internal Virtual Link record """
+    def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._ivld_msg = ivld_msg
+        self._vnfr_name = vnfr_name
+        self._cloud_account_name = cloud_account_name
+
+        self._vlr_req = self.create_vlr()
+        self._vlr = None
+        self._state = VlRecordState.INIT
+
+    @property
+    def vlr_id(self):
+        """ Find VLR by id """
+        return self._vlr_req.id
+
+    @property
+    def name(self):
+        """ Name of this VL """
+        return self._vnfr_name + "." + self._ivld_msg.name
+
+    @property
+    def network_id(self):
+        """ Find VLR by id """
+        return self._vlr.network_id if self._vlr else None
+
+    def vlr_path(self):
+        """ VLR path for this VLR instance"""
+        return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id)
+
+    def create_vlr(self):
+        """ Create the VLR record which will be instantiated """
+
+        vld_fields = ["short_name",
+                      "vendor",
+                      "description",
+                      "version",
+                      "type_yang",
+                      "provider_network"]
+
+        vld_copy_dict = {k: v for k, v in self._ivld_msg.as_dict().items() if k in vld_fields}
+
+        vlr_dict = {"id": str(uuid.uuid4()),
+                    "name": self.name,
+                    "cloud_account": self._cloud_account_name,
+                    }
+        vlr_dict.update(vld_copy_dict)
+
+        vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+        return vlr
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart_mode=False):
+        """ Instantiate VL """
+
+        @asyncio.coroutine
+        def instantiate_vlr():
+            """ Instantiate VLR"""
+            self._log.debug("Create VL with xpath %s and vlr %s",
+                            self.vlr_path(), self._vlr_req)
+
+            with self._dts.transaction(flags=0) as xact:
+                block = xact.block_create()
+                block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req)
+                self._log.debug("Executing VL create path:%s msg:%s",
+                                self.vlr_path(), self._vlr_req)
+
+                res_iter = None
+                try:
+                    res_iter = yield from block.execute()
+                except Exception:
+                    self._state = VlRecordState.FAILED
+                    self._log.exception("Caught exception while instantial VL")
+                    raise
+
+                for ent in res_iter:
+                    res = yield from ent
+                    self._vlr = res.result
+
+            if self._vlr.operational_status == 'failed':
+                self._log.debug("VL creation failed for vlr id %s", self._vlr.id)
+                self._state = VlRecordState.FAILED
+                raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id))
+
+            self._log.info("Created VL with xpath %s and vlr %s",
+                           self.vlr_path(), self._vlr)
+
+        @asyncio.coroutine
+        def get_vlr():
+            """ Get the network id """
+            res_iter = yield from self._dts.query_read(self.vlr_path(), rwdts.XactFlag.MERGE)
+            vlr = None
+            for ent in res_iter:
+                res = yield from ent
+                vlr = res.result
+
+            if vlr is None:
+                err = "Failed to get VLR for path  %s" % self.vlr_path()
+                self._log.warn(err)
+                raise InternalVirtualLinkRecordError(err)
+            return vlr
+
+        self._state = VlRecordState.INSTANTIATION_PENDING
+
+        if restart_mode:
+            vl = yield from get_vlr()
+            if vl is None:
+                yield from instantiate_vlr()
+        else:
+            yield from instantiate_vlr()
+
+        self._state = VlRecordState.ACTIVE
+
+    def vlr_in_vns(self):
+        """ Is there a VLR record in VNS """
+        if (self._state == VlRecordState.ACTIVE or
+                self._state == VlRecordState.INSTANTIATION_PENDING or
+                self._state == VlRecordState.FAILED):
+            return True
+
+        return False
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """Terminate this VL """
+        if not self.vlr_in_vns():
+            self._log.debug("Ignoring terminate request for id %s in state %s",
+                            self.vlr_id, self._state)
+            return
+
+        self._log.debug("Terminating VL with path %s", self.vlr_path())
+        self._state = VlRecordState.TERMINATE_PENDING
+        block = xact.block_create()
+        block.add_query_delete(self.vlr_path())
+        yield from block.execute(flags=0, now=True)
+        self._state = VlRecordState.TERMINATED
+        self._log.debug("Terminated VL with path %s", self.vlr_path())
+
+
+class VirtualNetworkFunctionRecord(object):
+    """ Virtual Network Function Record """
+    def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cluster_name = cluster_name
+        self._vnfr_msg = vnfr_msg
+        self._vnfr_id = vnfr_msg.id
+        self._vnfd_id = vnfr_msg.vnfd_ref
+        self._vnfm = vnfm
+        self._vcs_handler = vcs_handler
+        self._vnfr = vnfr_msg
+
+        self._vnfd = None
+        self._state = VirtualNetworkFunctionRecordState.INIT
+        self._state_failed_reason = None
+        self._ext_vlrs = {}  # The list of external virtual links
+        self._vlrs = []  # The list of internal virtual links
+        self._vdus = []  # The list of vdu
+        self._vlr_by_cp = {}
+        self._cprs = []
+        self._inventory = {}
+        self._create_time = int(time.time())
+        self._vnf_mon = None
+        self._config_status = vnfr_msg.config_status
+        self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log)
+
+    def _get_vdur_from_vdu_id(self, vdu_id):
+        self._log.debug("Finding vdur for vdu_id %s", vdu_id)
+        self._log.debug("Searching through vdus: %s", self._vdus)
+        for vdu in self._vdus:
+            self._log.debug("vdu_id: %s", vdu.vdu_id)
+            if vdu.vdu_id == vdu_id:
+                return vdu
+
+        raise VDURecordNotFound("Could not find vdu record from id: %s", vdu_id)
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VNFR """
+        op_status_map = {"INIT": "init",
+                         "VL_INIT_PHASE": "vl_init_phase",
+                         "VM_INIT_PHASE": "vm_init_phase",
+                         "READY": "running",
+                         "TERMINATE": "terminate",
+                         "VL_TERMINATE_PHASE": "vl_terminate_phase",
+                         "VDU_TERMINATE_PHASE": "vm_terminate_phase",
+                         "TERMINATED": "terminated",
+                         "FAILED": "failed", }
+        return op_status_map[self._state.name]
+
+    @property
+    def vnfd_xpath(self):
+        """ VNFD xpath associated with this VNFR """
+        return("C,/vnfd:vnfd-catalog/"
+               "vnfd:vnfd[vnfd:id = '{}']".format(self._vnfd_id))
+
+    @property
+    def vnfd(self):
+        """ VNFD for this VNFR """
+        return self._vnfd
+
+    @property
+    def vnf_name(self):
+        """ VNFD name associated with this VNFR """
+        return self.vnfd.name
+
+    @property
+    def name(self):
+        """ Name of this VNF in the record """
+        return self._vnfr.name
+
+    @property
+    def cloud_account_name(self):
+        """ Name of the cloud account this VNFR is instantiated in """
+        return self._vnfr.cloud_account
+
+    @property
+    def vnfd_id(self):
+        """ VNFD Id associated with this VNFR """
+        return self.vnfd.id
+
+    @property
+    def vnfr_id(self):
+        """ VNFR Id associated with this VNFR """
+        return self._vnfr_id
+
+    @property
+    def member_vnf_index(self):
+        """ Member VNF index associated with this VNFR """
+        return self._vnfr.member_vnf_index_ref
+
+    @property
+    def config_status(self):
+        """ Config agent status for this VNFR """
+        return self._config_status
+
+    def component_by_name(self, component_name):
+        """ Find a component by name in the inventory list"""
+        mangled_name = VcsComponent.mangle_name(component_name,
+                                                self.vnf_name,
+                                                self.vnfd_id)
+        return self._inventory[mangled_name]
+
+
+
+    @asyncio.coroutine
+    def get_nsr_config(self):
+        ### Need access to NS instance configuration for runtime resolution.
+        ### This shall be replaced when deployment flavors are implemented
+        xpath = "C,/nsr:ns-instance-config"
+        results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+        for result in results:
+            entry = yield from result
+            ns_instance_config = entry.result
+            for nsr in ns_instance_config.nsr:
+                if nsr.id == self._vnfr_msg.nsr_id_ref:
+                    return nsr
+        return None
+
+    @asyncio.coroutine
+    def start_component(self, component_name, ip_addr):
+        """ Start a component in the VNFR by name """
+        comp = self.component_by_name(component_name)
+        yield from comp.start(None, None, ip_addr)
+
+    def cp_ip_addr(self, cp_name):
+        """ Get ip address for connection point """
+        self._log.debug("cp_ip_addr()")
+        for cp in self._cprs:
+            if cp.name == cp_name and cp.ip_address is not None:
+                return cp.ip_address
+        return "0.0.0.0"
+
+    def mgmt_intf_info(self):
+        """ Get Management interface info for this VNFR """
+        mgmt_intf_desc = self.vnfd.msg.mgmt_interface
+        ip_addr = None
+        if mgmt_intf_desc.has_field("cp"):
+            ip_addr = self.cp_ip_addr(mgmt_intf_desc.cp)
+        elif mgmt_intf_desc.has_field("vdu_id"):
+            try:
+                vdur = self._get_vdur_from_vdu_id(mgmt_intf_desc.vdu_id)
+                ip_addr = vdur.management_ip
+            except VDURecordNotFound:
+                self._log.debug("Did not find mgmt interface for vnfr id %s", self._vnfr_id)
+                ip_addr = None
+        else:
+            ip_addr = mgmt_intf_desc.ip_address
+        port = mgmt_intf_desc.port
+
+        return ip_addr, port
+
+    @property
+    def msg(self):
+        """ Message associated with this VNFR """
+        vnfd_fields = ["short_name", "vendor", "description", "version"]
+        vnfd_copy_dict = {k: v for k, v in self.vnfd.msg.as_dict().items() if k in vnfd_fields}
+
+        mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
+        ip_address, port = self.mgmt_intf_info()
+
+        if ip_address is not None:
+            mgmt_intf.ip_address = ip_address
+        if port is not None:
+            mgmt_intf.port = port
+
+        vnfr_dict = {"id": self._vnfr_id,
+                     "nsr_id_ref": self._vnfr_msg.nsr_id_ref,
+                     "name": self.name,
+                     "member_vnf_index_ref": self.member_vnf_index,
+                     "vnfd_ref": self.vnfd_id,
+                     "operational_status": self.operational_status,
+                     "operational_status_details": self._state_failed_reason,
+                     "cloud_account": self.cloud_account_name,
+                     "config_status": self._config_status
+                     }
+
+        vnfr_dict.update(vnfd_copy_dict)
+
+        vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+        vnfr_msg.mgmt_interface = mgmt_intf
+
+        # Add all the VLRs  to  VNFR
+        for vlr in self._vlrs:
+            ivlr = vnfr_msg.internal_vlr.add()
+            ivlr.vlr_ref = vlr.vlr_id
+
+        # Add all the VDURs to VDUR
+        if self._vdus is not None:
+            for vdu in self._vdus:
+                vdur = vnfr_msg.vdur.add()
+                vdur.from_dict(vdu.msg.as_dict())
+
+        if self.vnfd.msg.mgmt_interface.has_field('dashboard_params'):
+            vnfr_msg.dashboard_url = self.dashboard_url
+
+        for cpr in self._cprs:
+            new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
+            vnfr_msg.connection_point.append(new_cp)
+
+        if self._vnf_mon is not None:
+            for monp in self._vnf_mon.msg:
+                vnfr_msg.monitoring_param.append(
+                    VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
+
+        if self._vnfr.vnf_configuration is not None:
+            vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict())
+            if (ip_address is not None and
+                    vnfr_msg.vnf_configuration.config_access.mgmt_ip_address is None):
+                vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address
+
+        for group in self._vnfr_msg.placement_groups_info:
+            group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+            group_info.from_dict(group.as_dict())
+            vnfr_msg.placement_groups_info.append(group_info)
+
+        return vnfr_msg
+
+    @property
+    def dashboard_url(self):
+        ip, cfg_port = self.mgmt_intf_info()
+        protocol = 'http'
+        http_port = 80
+        if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('https'):
+            if self.vnfd.msg.mgmt_interface.dashboard_params.https is True:
+                protocol = 'https'
+                http_port = 443
+        if self.vnfd.msg.mgmt_interface.dashboard_params.has_field('port'):
+            http_port = self.vnfd.msg.mgmt_interface.dashboard_params.port
+
+        url = "{protocol}://{ip_address}:{port}/{path}".format(
+                protocol=protocol,
+                ip_address=ip,
+                port=http_port,
+                path=self.vnfd.msg.mgmt_interface.dashboard_params.path.lstrip("/"),
+                )
+
+        return url
+
+    @property
+    def xpath(self):
+        """ path for this  VNFR """
+        return("D,/vnfr:vnfr-catalog"
+               "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id))
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ publish this VNFR """
+        vnfr = self.msg
+        self._log.debug("Publishing VNFR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+        vnfr.create_time = self._create_time
+        yield from self._vnfm.publish_vnfr(xact, self.xpath, self.msg)
+        self._log.debug("Published VNFR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+
+    @asyncio.coroutine
+    def create_vls(self):
+        """ Publish The VLs associated with this VNF """
+        self._log.debug("Publishing Internal Virtual Links for vnfd id: %s",
+                        self.vnfd_id)
+        for ivld_msg in self.vnfd.msg.internal_vld:
+            self._log.debug("Creating internal vld:"
+                            " %s, int_cp_ref = %s",
+                            ivld_msg, ivld_msg.internal_connection_point_ref
+                            )
+            vlr = InternalVirtualLinkRecord(dts=self._dts,
+                                            log=self._log,
+                                            loop=self._loop,
+                                            ivld_msg=ivld_msg,
+                                            vnfr_name=self.name,
+                                            cloud_account_name=self.cloud_account_name
+                                            )
+            self._vlrs.append(vlr)
+
+            for int_cp in ivld_msg.internal_connection_point_ref:
+                if int_cp in self._vlr_by_cp:
+                    msg = ("Connection point %s already "
+                           " bound %s" % (int_cp, self._vlr_by_cp[int_cp]))
+                    raise InternalVirtualLinkRecordError(msg)
+                self._log.debug("Setting vlr %s to internal cp = %s",
+                                vlr, int_cp)
+                self._vlr_by_cp[int_cp] = vlr
+
+    @asyncio.coroutine
+    def instantiate_vls(self, xact, restart_mode=False):
+        """ Instantiate the VLs associated with this VNF """
+        self._log.debug("Instantiating Internal Virtual Links for vnfd id: %s",
+                        self.vnfd_id)
+
+        for vlr in self._vlrs:
+            self._log.debug("Instantiating VLR %s", vlr)
+            yield from vlr.instantiate(xact, restart_mode)
+
+    def find_vlr_by_cp(self, cp_name):
+        """ Find the VLR associated with the cp name """
+        return self._vlr_by_cp[cp_name]
+
+    def resolve_placement_group_cloud_construct(self, input_group, nsr_config):
+        """
+        Returns the cloud specific construct for placement group
+        Arguments:
+            input_group: VNFD PlacementGroup
+            nsr_config: Configuration for VNFDGroup MAP in the NSR config
+        """
+        copy_dict = ['name', 'requirement', 'strategy']
+        for group_info in nsr_config.vnfd_placement_group_maps:
+            if group_info.placement_group_ref == input_group.name and \
+               group_info.vnfd_id_ref == self.vnfd_id:
+                group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+                group_dict = {k:v for k,v in
+                              group_info.as_dict().items()
+                              if (k != 'placement_group_ref' and k !='vnfd_id_ref')}
+                for param in copy_dict:
+                    group_dict.update({param: getattr(input_group, param)})
+                group.from_dict(group_dict)
+                return group
+        return None
+
+    @asyncio.coroutine
+    def get_vdu_placement_groups(self, vdu):
+        placement_groups = []
+        ### Step-1: Get VNF level placement groups
+        for group in self._vnfr_msg.placement_groups_info:
+            #group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+            #group_info.from_dict(group.as_dict())
+            placement_groups.append(group)
+
+        ### Step-2: Get NSR config. This is required for resolving placement_groups cloud constructs
+        nsr_config = yield from self.get_nsr_config()
+
+        ### Step-3: Get VDU level placement groups
+        for group in self.vnfd.msg.placement_groups:
+            for member_vdu in group.member_vdus:
+                if member_vdu.member_vdu_ref == vdu.id:
+                    group_info = self.resolve_placement_group_cloud_construct(group,
+                                                                              nsr_config)
+                    if group_info is None:
+                        self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
+                        ### raise VNFMPlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+                    else:
+                        self._log.info("Successfully resolved cloud construct for placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
+                                       str(group_info),
+                                       vdu.name,
+                                       self.vnf_name,
+                                       self.member_vnf_index)
+                        placement_groups.append(group_info)
+
+        return placement_groups
+
+    @asyncio.coroutine
+    def create_vdus(self, vnfr, restart_mode=False):
+        """ Create the VDUs associated with this VNF """
+
+        def get_vdur_id(vdud):
+            """Get the corresponding VDUR's id for the VDUD. This is useful in
+            case of a restart.
+
+            In restart mode we check for exiting VDUR's ID and use them, if
+            available. This way we don't end up creating duplicate VDURs
+            """
+            vdur_id = None
+
+            if restart_mode and vdud is not None:
+                try:
+                    vdur = [vdur.id for vdur in vnfr._vnfr.vdur if vdur.vdu_id_ref == vdud.id]
+                    vdur_id = vdur[0]
+                except IndexError:
+                    self._log.error("Unable to find a VDUR for VDUD {}".format(vdud))
+
+            return vdur_id
+
+
+        self._log.info("Creating VDU's for vnfd id: %s", self.vnfd_id)
+        for vdu in self.vnfd.msg.vdu:
+            self._log.debug("Creating vdu: %s", vdu)
+            vdur_id = get_vdur_id(vdu)
+
+            placement_groups = yield from self.get_vdu_placement_groups(vdu)
+            self._log.info("Launching VDU: %s from VNFD :%s (Member Index: %s) with Placement Groups: %s",
+                           vdu.name,
+                           self.vnf_name,
+                           self.member_vnf_index,
+                           [ group.name for group in placement_groups])
+
+            vdur = VirtualDeploymentUnitRecord(
+                dts=self._dts,
+                log=self._log,
+                loop=self._loop,
+                vdud=vdu,
+                vnfr=vnfr,
+                mgmt_intf=self.has_mgmt_interface(vdu),
+                cloud_account_name=self.cloud_account_name,
+                vnfd_package_store=self._vnfd_package_store,
+                vdur_id=vdur_id,
+                placement_groups = placement_groups,
+                )
+            yield from vdur.vdu_opdata_register()
+
+            self._vdus.append(vdur)
+
+    @asyncio.coroutine
+    def instantiate_vdus(self, xact, vnfr):
+        """ Instantiate the VDUs associated with this VNF """
+        self._log.debug("Instantiating VDU's for vnfd id %s: %s", self.vnfd_id, self._vdus)
+
+        lookup = {vdu.vdu_id: vdu for vdu in self._vdus}
+
+        # Identify any dependencies among the VDUs
+        dependencies = collections.defaultdict(list)
+        vdu_id_pattern = re.compile(r"\{\{ vdu\[([^]]+)\]\S* \}\}")
+
+        for vdu in self._vdus:
+            if vdu.vdud_cloud_init is not None:
+                for vdu_id in vdu_id_pattern.findall(vdu.vdud_cloud_init):
+                    if vdu_id != vdu.vdu_id:
+                        # This means that vdu.vdu_id depends upon vdu_id,
+                        # i.e. vdu_id must be instantiated before
+                        # vdu.vdu_id.
+                        dependencies[vdu.vdu_id].append(lookup[vdu_id])
+
+        # Define the terminal states of VDU instantiation
+        terminal = (
+                VDURecordState.READY,
+                VDURecordState.TERMINATED,
+                VDURecordState.FAILED,
+                )
+
+        datastore = VdurDatastore()
+        processed = set()
+
+        @asyncio.coroutine
+        def instantiate_monitor(vdu):
+            """Monitor the state of the VDU during instantiation
+
+            Arguments:
+                vdu - a VirtualDeploymentUnitRecord
+
+            """
+            # wait for the VDUR to enter a terminal state
+            while vdu._state not in terminal:
+                yield from asyncio.sleep(1, loop=self._loop)
+
+            # update the datastore
+            datastore.update(vdu)
+
+            # add the VDU to the set of processed VDUs
+            processed.add(vdu.vdu_id)
+
+        @asyncio.coroutine
+        def instantiate(vdu):
+            """Instantiate the specified VDU
+
+            Arguments:
+                vdu - a VirtualDeploymentUnitRecord
+
+            Raises:
+                if the VDU, or any of the VDUs this VDU depends upon, are
+                terminated or fail to instantiate properly, a
+                VirtualDeploymentUnitRecordError is raised.
+
+            """
+            for dependency in dependencies[vdu.vdu_id]:
+                self._log.debug("{}: waiting for {}".format(vdu.vdu_id, dependency.vdu_id))
+
+                while dependency.vdu_id not in processed:
+                    yield from asyncio.sleep(1, loop=self._loop)
+
+                if not dependency.active:
+                    raise VirtualDeploymentUnitRecordError()
+
+            self._log.debug('instantiating {}'.format(vdu.vdu_id))
+
+            # Populate the datastore with the current values of the VDU
+            datastore.add(vdu)
+
+            # Substitute any variables contained in the cloud config script
+            config = str(vdu.vdud_cloud_init)
+
+            parts = re.split("\{\{ ([^\}]+) \}\}", config)
+            if len(parts) > 1:
+
+                # Extract the variable names
+                variables = list()
+                for variable in parts[1::2]:
+                    variables.append(variable.lstrip('{{').rstrip('}}').strip())
+
+                # Iterate of the variables and substitute values from the
+                # datastore.
+                for variable in variables:
+
+                    # Handle a reference to a VDU by ID
+                    if variable.startswith('vdu['):
+                        value = datastore.get(variable)
+                        if value is None:
+                            msg = "Unable to find a substitute for {} in {} cloud-init script"
+                            raise ValueError(msg.format(variable, vdu.vdu_id))
+
+                        config = config.replace("{{ %s }}" % variable, value)
+                        continue
+
+                    # Handle a reference to the current VDU
+                    if variable.startswith('vdu'):
+                        value = datastore.get('vdu[{}]'.format(vdu.vdu_id) + variable[3:])
+                        config = config.replace("{{ %s }}" % variable, value)
+                        continue
+
+                    # Handle unrecognized variables
+                    msg = 'unrecognized cloud-config variable: {}'
+                    raise ValueError(msg.format(variable))
+
+            # Instantiate the VDU
+            with self._dts.transaction() as xact:
+                self._log.debug("Instantiating vdu: %s", vdu)
+                yield from vdu.instantiate(xact, vnfr, config=config)
+                if self._state == VirtualNetworkFunctionRecordState.FAILED:
+                    self._log.error("Instatiation of VNF %s failed while instantiating vdu %s",
+                                    self.vnfr_id, vdu)
+
+        # First create a set of tasks to monitor the state of the VDUs and
+        # report when they have entered a terminal state
+        for vdu in self._vdus:
+            self._loop.create_task(instantiate_monitor(vdu))
+
+        for vdu in self._vdus:
+            self._loop.create_task(instantiate(vdu))
+
+    def has_mgmt_interface(self, vdu):
+        # ## TODO: Support additional mgmt_interface type options
+        if self.vnfd.msg.mgmt_interface.vdu_id == vdu.id:
+            return True
+        return False
+
+    def vlr_xpath(self, vlr_id):
+        """ vlr xpath """
+        return(
+            "D,/vlr:vlr-catalog/"
+            "vlr:vlr[vlr:id = '{}']".format(vlr_id))
+
+    def ext_vlr_by_id(self, vlr_id):
+        """ find ext vlr by id """
+        return self._ext_vlrs[vlr_id]
+
+    @asyncio.coroutine
+    def publish_inventory(self, xact):
+        """ Publish the inventory associated with this VNF """
+        self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id)
+
+        for component in self.vnfd.msg.component:
+            self._log.debug("Creating inventory component %s", component)
+            mangled_name = VcsComponent.mangle_name(component.component_name,
+                                                    self.vnf_name,
+                                                    self.vnfd_id
+                                                    )
+            comp = VcsComponent(dts=self._dts,
+                                log=self._log,
+                                loop=self._loop,
+                                cluster_name=self._cluster_name,
+                                vcs_handler=self._vcs_handler,
+                                component=component,
+                                mangled_name=mangled_name,
+                                )
+            if comp.name in self._inventory:
+                self._log.debug("Duplicate entries in inventory  %s for vnfr %s",
+                                component, self._vnfd_id)
+                return
+            self._log.debug("Adding component %s for vnrf %s",
+                            comp.name, self._vnfr_id)
+            self._inventory[comp.name] = comp
+            yield from comp.publish(xact)
+
+    def all_vdus_active(self):
+        """ Are all VDUS in this VNFR active? """
+        for vdu in self._vdus:
+            if not vdu.active:
+                return False
+
+        self._log.debug("Inside all_vdus_active. Returning True")
+        return True
+
+    @asyncio.coroutine
+    def instantiation_failed(self, failed_reason=None):
+        """ VNFR instantiation failed """
+        self._log.debug("VNFR %s instantiation failed ", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.FAILED)
+        self._state_failed_reason = failed_reason
+
+        # Update the VNFR with the changed status
+        yield from self.publish(None)
+
+    @asyncio.coroutine
+    def is_ready(self):
+        """ This VNF is ready"""
+        self._log.debug("VNFR id %s is ready", self.vnfr_id)
+
+        if self._state != VirtualNetworkFunctionRecordState.FAILED:
+            self.set_state(VirtualNetworkFunctionRecordState.READY)
+
+        else:
+            self._log.debug("VNFR id %s ignoring state change", self.vnfr_id)
+
+        # Update the VNFR with the changed status
+        yield from self.publish(None)
+
+    def update_cp(self, cp_name, ip_address, cp_id):
+        """Updated the connection point with ip address"""
+        for cp in self._cprs:
+            if cp.name == cp_name:
+                self._log.debug("Setting ip address and id for cp %s, cpr %s with ip %s id %s",
+                                cp_name, cp, ip_address, cp_id)
+                cp.ip_address = ip_address
+                cp.connection_point_id = cp_id
+                return
+
+        err = "No connection point %s found in VNFR id %s" % (cp.name, self._vnfr_id)
+        self._log.debug(err)
+        raise VirtualDeploymentUnitRecordError(err)
+
+    def set_state(self, state):
+        """ Set state for this VNFR"""
+        self._state = state
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart_mode=False):
+        """ instantiate this VNF """
+        self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
+
+        @asyncio.coroutine
+        def fetch_vlrs():
+            """ Fetch VLRs """
+            # Iterate over all the connection points in VNFR and fetch the
+            # associated VLRs
+
+            def cpr_from_cp(cp):
+                """ Creates a record level connection point from the desciptor cp"""
+                cp_fields = ["name", "image", "vm-flavor"]
+                cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}
+                cpr_dict = {}
+                cpr_dict.update(cp_copy_dict)
+                return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
+
+            self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s",
+                            self._vnfr_id, self._vnfr.connection_point)
+
+            for cp in self._vnfr.connection_point:
+                cpr = cpr_from_cp(cp)
+                self._cprs.append(cpr)
+                self._log.debug("Adding Connection point record  %s ", cp)
+
+                vlr_path = self.vlr_xpath(cp.vlr_ref)
+                self._log.debug("Fetching VLR with path = %s", vlr_path)
+                res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref),
+                                                           rwdts.XactFlag.MERGE)
+                for i in res_iter:
+                    r = yield from i
+                    d = r.result
+                    self._ext_vlrs[cp.vlr_ref] = d
+                    cpr.vlr_ref = cp.vlr_ref
+                    self._log.debug("Fetched VLR [%s] with path = [%s]", d, vlr_path)
+
+        # Fetch the VNFD associated with the VNFR
+        self._log.debug("VNFR-ID %s: Fetching vnfds", self._vnfr_id)
+        self._vnfd = yield from self._vnfm.get_vnfd_ref(self._vnfd_id)
+        self._log.debug("VNFR-ID %s: Fetched vnfd:%s", self._vnfr_id, self._vnfd)
+
+        assert self.vnfd is not None
+
+        # Fetch External VLRs
+        self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id)
+        yield from fetch_vlrs()
+
+        # Publish inventory
+        self._log.debug("VNFR-ID %s: Publishing Inventory", self._vnfr_id)
+        yield from self.publish_inventory(xact)
+
+        # Publish inventory
+        self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id)
+        yield from self.create_vls()
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        # instantiate VLs
+        self._log.debug("VNFR-ID %s: Instantiate VLs", self._vnfr_id)
+        try:
+            yield from self.instantiate_vls(xact, restart_mode)
+        except Exception as e:
+            self._log.exception("VL instantiation failed (%s)", str(e))
+            yield from self.instantiation_failed(str(e))
+            return
+
+        self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE)
+
+        # instantiate VDUs
+        self._log.debug("VNFR-ID %s: Create VDUs", self._vnfr_id)
+        yield from self.create_vdus(self, restart_mode)
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        # instantiate VDUs
+        # ToDo: Check if this should be prevented during restart
+        self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id)
+        _ = self._loop.create_task(self.instantiate_vdus(xact, self))
+
+        # publish the VNFR
+        self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+        yield from self.publish(xact)
+
+        self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id)
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Terminate this virtual network function """
+
+        self._log.debug("Terminatng VNF id %s", self.vnfr_id)
+
+        self.set_state(VirtualNetworkFunctionRecordState.TERMINATE)
+
+        # stop monitoring
+        if self._vnf_mon is not None:
+            self._vnf_mon.stop()
+            self._vnf_mon.deregister()
+            self._vnf_mon = None
+
+        @asyncio.coroutine
+        def terminate_vls():
+            """ Terminate VLs in this VNF """
+            for vl in self._vlrs:
+                yield from vl.terminate(xact)
+
+        @asyncio.coroutine
+        def terminate_vdus():
+            """ Terminate VDUS in this VNF """
+            for vdu in self._vdus:
+                yield from vdu.terminate(xact)
+
+        self._log.debug("Terminatng VLs in VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.VL_TERMINATE_PHASE)
+        yield from terminate_vls()
+
+        self._log.debug("Terminatng VDUs in VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.VDU_TERMINATE_PHASE)
+        yield from terminate_vdus()
+
+        self._log.debug("Terminated  VNF id %s", self.vnfr_id)
+        self.set_state(VirtualNetworkFunctionRecordState.TERMINATED)
+
+
+class VnfdDtsHandler(object):
+    """ DTS handler for VNFD config changes """
+    XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+        self._regh = None
+
+    @asyncio.coroutine
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD configuration"""
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            self._log.debug("Got VNFM VNFD apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+            # Create/Update a VNFD record
+            for cfg in self._regh.get_xact_elements(xact):
+                # Only interested in those VNFD cfgs whose ID was received in prepare callback
+                if cfg.id in scratch.get('vnfds', []) or is_recovery:
+                    self._vnfm.update_vnfd(cfg)
+
+            scratch.pop('vnfds', None)
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ on prepare callback """
+            self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)",
+                            ks_path.to_xpath(RwVnfmYang.get_schema()), msg)
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            # Handle deletes in prepare_callback, but adds/updates in apply_callback
+            if fref.is_field_deleted():
+                # Delete an VNFD record
+                self._log.debug("Deleting VNFD with id %s", msg.id)
+                if self._vnfm.vnfd_in_use(msg.id):
+                    self._log.debug("Cannot delete VNFD in use - %s", msg)
+                    err = "Cannot delete a VNFD in use - %s" % msg
+                    raise VirtualNetworkFunctionDescriptorRefCountExists(err)
+                # Delete a VNFD record
+                yield from self._vnfm.delete_vnfd(msg.id)
+            else:
+                # Handle actual adds/updates in apply_callback,
+                # just check if VNFD in use in prepare_callback
+                if self._vnfm.vnfd_in_use(msg.id):
+                    self._log.debug("Cannot modify an VNFD in use - %s", msg)
+                    err = "Cannot modify an VNFD in use - %s" % msg
+                    raise VirtualNetworkFunctionDescriptorRefCountExists(err)
+
+                # Add this VNFD to scratch to create/update in apply callback
+                vnfds = scratch.setdefault('vnfds', [])
+                vnfds.append(msg.id)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug(
+            "Registering for VNFD config using xpath: %s",
+            VnfdDtsHandler.XPATH,
+            )
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._regh = acg.register(
+                xpath=VnfdDtsHandler.XPATH,
+                flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                on_prepare=on_prepare)
+
+
+class VcsComponentDtsHandler(object):
+    """ Vcs Component DTS handler """
+    XPATH = ("D,/rw-manifest:manifest" +
+             "/rw-manifest:operational-inventory" +
+             "/rw-manifest:component")
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._vnfm = vnfm
+
+    @property
+    def regh(self):
+        """ DTS registration handle """
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Registers VCS component dts publisher registration"""
+        self._log.debug("VCS Comp publisher DTS handler registering path %s",
+                        VcsComponentDtsHandler.XPATH)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler()
+        handlers = rift.tasklets.Group.Handler()
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=VcsComponentDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.DATASTORE),)
+
+    @asyncio.coroutine
+    def publish(self, xact, path, msg):
+        """ Publishes the VCS component """
+        self._log.debug("Publishing the VcsComponent xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Published the VcsComponent to %s xact = %s, %s:%s",
+                        VcsComponentDtsHandler.XPATH, xact, path, msg)
+
+class VnfrConsoleOperdataDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+    @property
+    def vnfr_vdu_console_xpath(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+
+    def __init__(self, dts, log, loop, vnfm, vnfr_id, vdur_id, vdu_id):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._regh = None
+        self._vnfm = vnfm
+
+        self._vnfr_id = vnfr_id
+        self._vdur_id = vdur_id
+        self._vdu_id = vdu_id
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFR VDU Operational Data read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFR VDU Opdata xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("VDU Opdata path is {}".format(path_entry))
+                try:
+                    vnfr = self._vnfm.get_vnfr(self._vnfr_id)
+                except VnfRecordError as e:
+                    self._log.error("VNFR id %s not found", self._vnfr_id)
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                    return
+                try:
+                    vdur= vnfr._get_vdur_from_vdu_id(self._vdu_id)
+                    if not vdur._state == VDURecordState.READY:
+                        self._log.debug("VDUR state is not READY. current state is {}".format(vdur._state))
+                        xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                        return 
+                    with self._dts.transaction() as new_xact:
+                        resp = yield from vdur.read_resource(new_xact)
+                        vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                        vdur_console.id = self._vdur_id
+                        if resp.console_url:
+                            vdur_console.console_url = resp.console_url
+                        else:
+                            vdur_console.console_url = 'none'
+                        self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
+                except Exception:
+                    self._log.exception("Caught exception while reading VDU %s", self._vdu_id)
+                    vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+                    vdur_console.id = self._vdur_id
+                    vdur_console.console_url = 'none'
+                      
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+                                            xpath=self.vnfr_vdu_console_xpath,
+                                            msg=vdur_console)
+            else:
+                #raise VnfRecordError("Not supported operation %s" % action)
+                self._log.error("Not supported operation %s" % action)
+                xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK)
+                return 
+                 
+
+        self._log.debug("Registering for VNFR VDU using xpath: %s",
+                        self.vnfr_vdu_console_xpath)
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=self.vnfr_vdu_console_xpath,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class VnfrDtsHandler(object):
+    """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
+    XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle"""
+        return self._regh
+
+    @property
+    def vnfm(self):
+        """ Return VNF manager instance """
+        return self._vnfm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for vnfr create/update/delete/read requests from dts """
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
+            return rwdts.MemberRspCode.ACTION_OK
+
+        def on_abort(*args):
+            """ Abort callback """
+            self._log.debug("VNF  transaction got aborted")
+
+        @asyncio.coroutine
+        def on_event(dts, g_reg, xact, xact_event, scratch_data):
+
+            @asyncio.coroutine
+            def instantiate_realloc_vnfr(vnfr):
+                """Re-populate the vnfm after restart
+
+                Arguments:
+                    vlink
+
+                """
+
+                yield from vnfr.instantiate(None, restart_mode=True)
+
+            if xact_event == rwdts.MemberEvent.INSTALL:
+                curr_cfg = self.regh.elements
+                for cfg in curr_cfg:
+                    vnfr = self.vnfm.create_vnfr(cfg)
+                    self._loop.create_task(instantiate_realloc_vnfr(vnfr))
+
+            self._log.debug("Got on_event in vnfm")
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            self._log.debug(
+                "Got vnfr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.CREATE:
+                if not msg.has_field("vnfd_ref"):
+                    err = "Vnfd reference not provided"
+                    self._log.error(err)
+                    raise VnfRecordError(err)
+
+                vnfr = self.vnfm.create_vnfr(msg)
+                try:
+                    # RIFT-9105: Unable to add a READ query under an existing transaction
+                    # xact = xact_info.xact
+                    yield from vnfr.instantiate(None)
+                except Exception as e:
+                    self._log.exception(e)
+                    self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id)
+                    vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
+                    yield from vnfr.publish(None)
+            elif action == rwdts.QueryAction.DELETE:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
+
+                if vnfr is None:
+                    self._log.debug("VNFR id %s not found for delete", path_entry.key00.id)
+                    raise VirtualNetworkFunctionRecordNotFound(
+                        "VNFR id %s", path_entry.key00.id)
+
+                try:
+                    yield from vnfr.terminate(xact_info.xact)
+                    # Unref the VNFD
+                    vnfr.vnfd.unref()
+                    yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr)
+                except Exception as e:
+                    self._log.exception(e)
+                    self._log.error("Caught exception while deleting vnfr %s", path_entry.key00.id)
+
+            elif action == rwdts.QueryAction.UPDATE:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfr = None
+                try:
+                    vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
+                except Exception as e:
+                    self._log.debug("No vnfr found with id %s", path_entry.key00.id)
+                    xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                    return
+
+                if vnfr is None:
+                    self._log.debug("VNFR id %s not found for update", path_entry.key00.id)
+                    xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                    return
+
+                self._log.debug("VNFR {} update config status {} (current {})".
+                                format(vnfr.name, msg.config_status, vnfr.config_status))
+                # Update the config status and publish
+                vnfr._config_status = msg.config_status
+                yield from vnfr.publish(None)
+
+            else:
+                raise NotImplementedError(
+                    "%s action on VirtualNetworkFunctionRecord not supported",
+                    action)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        self._log.debug("Registering for VNFR using xpath: %s",
+                        VnfrDtsHandler.XPATH,)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
+                                                    on_prepare=on_prepare,)
+        handlers = rift.tasklets.Group.Handler(on_event=on_event,)
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=(rwdts.Flag.PUBLISHER |
+                                               rwdts.Flag.NO_PREP_READ |
+                                               rwdts.Flag.CACHE |
+                                               rwdts.Flag.DATASTORE),)
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VNFR record in DTS with path and message
+        """
+        self._log.debug("Creating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VNFR record in DTS with path and message
+        """
+        self._log.debug("Updating VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VNFR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VNFR record in DTS with path and message
+        """
+        self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
+
+
+class VirtualNetworkFunctionDescriptor(object):
+    """
+    Virtual Network Function descriptor class
+    """
+
+    def __init__(self, dts, log, loop, vnfm, vnfd):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+
+        self._vnfm = vnfm
+        self._vnfd = vnfd
+        self._ref_count = 0
+
+    @property
+    def ref_count(self):
+        """ Returns the reference count associated with
+            this Virtual Network Function Descriptor"""
+        return self._ref_count
+
+    @property
+    def id(self):
+        """ Returns vnfd id """
+        return self._vnfd.id
+
+    @property
+    def name(self):
+        """ Returns vnfd name """
+        return self._vnfd.name
+
+    def in_use(self):
+        """ Returns whether vnfd is in use or not """
+        return True if self._ref_count > 0 else False
+
+    def ref(self):
+        """ Take a reference on this object """
+        self._ref_count += 1
+        return self._ref_count
+
+    def unref(self):
+        """ Release reference on this object """
+        if self.ref_count < 1:
+            msg = ("Unref on a VNFD object - vnfd id %s, ref_count = %s" %
+                   (self.id, self._ref_count))
+            self._log.critical(msg)
+            raise VnfRecordError(msg)
+        self._log.debug("Releasing ref on VNFD %s - curr ref_count:%s",
+                        self.id, self.ref_count)
+        self._ref_count -= 1
+        return self._ref_count
+
+    @property
+    def msg(self):
+        """ Return the message associated with this NetworkServiceDescriptor"""
+        return self._vnfd
+
+    @staticmethod
+    def path_for_id(vnfd_id):
+        """ Return path for the passed vnfd_id"""
+        return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+
+    def path(self):
+        """ Return the path associated with this NetworkServiceDescriptor"""
+        return VirtualNetworkFunctionDescriptor.path_for_id(self.id)
+
+    def update(self, vnfd):
+        """ Update the Virtual Network Function Descriptor """
+        if self.in_use():
+            self._log.error("Cannot update descriptor %s in use refcnt=%d",
+                            self.id, self.ref_count)
+
+            # The following loop is  added to debug RIFT-13284
+            for vnf_rec in self._vnfm._vnfrs.values():
+                if vnf_rec.vnfd_id == self.id:
+                    self._log.error("descriptor %s in used by %s:%s",
+                                    self.id, vnf_rec.vnfr_id, vnf_rec.msg)
+            raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot update descriptor in use %s" % self.id)
+        self._vnfd = vnfd
+
+    def delete(self):
+        """ Delete the Virtual Network Function Descriptor """
+        if self.in_use():
+            self._log.error("Cannot delete descriptor %s in use refcnt=%d",
+                            self.id)
+
+            # The following loop is  added to debug RIFT-13284
+            for vnf_rec in self._vnfm._vnfrs.values():
+                if vnf_rec.vnfd_id == self.id:
+                    self._log.error("descriptor %s in used by %s:%s",
+                                    self.id, vnf_rec.vnfr_id, vnf_rec.msg)
+            raise VirtualNetworkFunctionDescriptorRefCountExists("Cannot delete descriptor in use %s" % self.id)
+        self._vnfm.delete_vnfd(self.id)
+
+
+class VnfdRefCountDtsHandler(object):
+    """ The VNFD Ref Count DTS handler """
+    XPATH = "D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count"
+
+    def __init__(self, dts, log, loop, vnfm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnfm = vnfm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ Return registration handle """
+        return self._regh
+
+    @property
+    def vnfm(self):
+        """ Return the NS manager instance """
+        return self._vnfm
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for VNFD ref count read from dts """
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare callback from dts """
+            xpath = ks_path.to_xpath(RwVnfrYang.get_schema())
+            self._log.debug(
+                "Got VNFD ref count get xact_info: %s, action: %s): %s:%s",
+                xact_info, action, xpath, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref)
+                for xpath, msg in vnfd_list:
+                    self._log.debug("Responding to ref count query path:%s, msg:%s",
+                                    xpath, msg)
+                    xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE,
+                                            xpath=xpath,
+                                            msg=msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            else:
+                raise VnfRecordError("Not supported operation %s" % action)
+
+        hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+        with self._dts.group_create() as group:
+            self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH,
+                                        handler=hdl,
+                                        flags=rwdts.Flag.PUBLISHER,
+                                        )
+
+
+class VdurDatastore(object):
+    """
+    This VdurDatastore is intended to expose select information about a VDUR
+    such that it can be referenced in a cloud config file. The data that is
+    exposed does not necessarily follow the structure of the data in the yang
+    model. This is intentional. The data that are exposed are intended to be
+    agnostic of the yang model so that changes in the model do not necessarily
+    require changes to the interface provided to the user. It also means that
+    the user does not need to be familiar with the RIFT.ware yang models.
+    """
+
+    def __init__(self):
+        """Create an instance of VdurDatastore"""
+        self._vdur_data = dict()
+        self._pattern = re.compile("vdu\[([^]]+)\]\.(.+)")
+
+    def add(self, vdur):
+        """Add a new VDUR to the datastore
+
+        Arguments:
+            vdur - a VirtualDeploymentUnitRecord instance
+
+        Raises:
+            A ValueError is raised if the VDUR is (1) None or (2) already in
+            the datastore.
+
+        """
+        if vdur.vdu_id is None:
+            raise ValueError('VDURs are required to have an ID')
+
+        if vdur.vdu_id in self._vdur_data:
+            raise ValueError('cannot add a VDUR more than once')
+
+        self._vdur_data[vdur.vdu_id] = dict()
+
+        def set_if_not_none(key, attr):
+            if attr is not None:
+                self._vdur_data[vdur.vdu_id][key] = attr
+
+        set_if_not_none('name', vdur._vdud.name)
+        set_if_not_none('mgmt.ip', vdur.vm_management_ip)
+
+    def update(self, vdur):
+        """Update the VDUR information in the datastore
+
+        Arguments:
+            vdur - a GI representation of a VDUR
+
+        Raises:
+            A ValueError is raised if the VDUR is (1) None or (2) already in
+            the datastore.
+
+        """
+        if vdur.vdu_id is None:
+            raise ValueError('VNFDs are required to have an ID')
+
+        if vdur.vdu_id not in self._vdur_data:
+            raise ValueError('VNF is not recognized')
+
+        def set_or_delete(key, attr):
+            if attr is None:
+                if key in self._vdur_data[vdur.vdu_id]:
+                    del self._vdur_data[vdur.vdu_id][key]
+
+            else:
+                self._vdur_data[vdur.vdu_id][key] = attr
+
+        set_or_delete('name', vdur._vdud.name)
+        set_or_delete('mgmt.ip', vdur.vm_management_ip)
+
+    def remove(self, vdur_id):
+        """Remove all of the data associated with specified VDUR
+
+        Arguments:
+            vdur_id - the identifier of a VNFD in the datastore
+
+        Raises:
+            A ValueError is raised if the VDUR is not contained in the
+            datastore.
+
+        """
+        if vdur_id not in self._vdur_data:
+            raise ValueError('VNF is not recognized')
+
+        del self._vdur_data[vdur_id]
+
+    def get(self, expr):
+        """Retrieve VDUR information from the datastore
+
+        An expression should be of the form,
+
+            vdu[<id>].<attr>
+
+        where <id> is the VDUR ID (an unquoted UUID), and <attr> is the name of
+        the exposed attribute that the user wishes to retrieve.
+
+        If the requested data is not available, None is returned.
+
+        Arguments:
+            expr - a string that specifies the data to return
+
+        Raises:
+            A ValueError is raised if the provided expression cannot be parsed.
+
+        Returns:
+            The requested data or None
+
+        """
+        result = self._pattern.match(expr)
+        if result is None:
+            raise ValueError('data expression not recognized ({})'.format(expr))
+
+        vdur_id, key = result.groups()
+
+        if vdur_id not in self._vdur_data:
+            return None
+
+        return self._vdur_data[vdur_id].get(key, None)
+
+
+class VnfManager(object):
+    """ The virtual network function manager class """
+    def __init__(self, dts, log, loop, cluster_name):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._cluster_name = cluster_name
+
+        self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
+        self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+
+        self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
+                              self._vnfr_handler,
+                              self._vcs_handler,
+                              VnfdRefCountDtsHandler(dts, log, loop, self)]
+        self._vnfrs = {}
+        self._vnfds = {}
+
+    @property
+    def vnfr_handler(self):
+        """ VNFR dts handler """
+        return self._vnfr_handler
+
+    @property
+    def vcs_handler(self):
+        """ VCS dts handler """
+        return self._vcs_handler
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers """
+        for hdl in self._dts_handlers:
+            yield from hdl.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ Run this VNFM instance """
+        self._log.debug("Run VNFManager - registering static DTS handlers""")
+        yield from self.register()
+
+    def get_vnfr(self, vnfr_id):
+        """ get VNFR by vnfr id """
+
+        if vnfr_id not in self._vnfrs:
+            raise VnfRecordError("VNFR id %s not found", vnfr_id)
+
+        return self._vnfrs[vnfr_id]
+
+    def create_vnfr(self, vnfr):
+        """ Create a VNFR instance """
+        if vnfr.id in self._vnfrs:
+            msg = "Vnfr id %s already exists" % vnfr.id
+            self._log.error(msg)
+            raise VnfRecordError(msg)
+
+        self._log.info("Create VirtualNetworkFunctionRecord %s from vnfd_id: %s",
+                       vnfr.id,
+                       vnfr.vnfd_ref)
+
+        self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord(
+            self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr
+            )
+        return self._vnfrs[vnfr.id]
+
+    @asyncio.coroutine
+    def delete_vnfr(self, xact, vnfr):
+        """ Create a VNFR instance """
+        if vnfr.vnfr_id in self._vnfrs:
+            self._log.debug("Deleting VNFR id %s", vnfr.vnfr_id)
+            yield from self._vnfr_handler.delete(xact, vnfr.xpath)
+            del self._vnfrs[vnfr.vnfr_id]
+
+    @asyncio.coroutine
+    def fetch_vnfd(self, vnfd_id):
+        """ Fetch VNFDs based with the vnfd id"""
+        vnfd_path = VirtualNetworkFunctionDescriptor.path_for_id(vnfd_id)
+        self._log.debug("Fetch vnfd with path %s", vnfd_path)
+        vnfd = None
+
+        res_iter = yield from self._dts.query_read(vnfd_path, rwdts.XactFlag.MERGE)
+
+        for ent in res_iter:
+            res = yield from ent
+            vnfd = res.result
+
+        if vnfd is None:
+            err = "Failed to get  Vnfd %s" % vnfd_id
+            self._log.error(err)
+            raise VnfRecordError(err)
+
+        self._log.debug("Fetched vnfd for path %s, vnfd - %s", vnfd_path, vnfd)
+
+        return vnfd
+
+    @asyncio.coroutine
+    def get_vnfd_ref(self, vnfd_id):
+        """ Get Virtual Network Function descriptor for the passed vnfd_id"""
+        vnfd = yield from self.get_vnfd(vnfd_id)
+        vnfd.ref()
+        return vnfd
+
+    @asyncio.coroutine
+    def get_vnfd(self, vnfd_id):
+        """ Get Virtual Network Function descriptor for the passed vnfd_id"""
+        vnfd = None
+        if vnfd_id not in self._vnfds:
+            self._log.error("Cannot find VNFD id:%s", vnfd_id)
+            vnfd = yield from self.fetch_vnfd(vnfd_id)
+
+            if vnfd is None:
+                self._log.error("Cannot find VNFD id:%s", vnfd_id)
+                raise VirtualNetworkFunctionDescriptorError("Cannot find VNFD id:%s", vnfd_id)
+
+            if vnfd.id != vnfd_id:
+                self._log.error("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
+                raise VirtualNetworkFunctionDescriptorError("Bad Recovery state {} found for {}".format(vnfd.id, vnfd_id))
+
+            if vnfd.id not in self._vnfds:
+                self.create_vnfd(vnfd)
+
+        return self._vnfds[vnfd_id]
+
+    def vnfd_in_use(self, vnfd_id):
+        """ Is this VNFD in use """
+        self._log.debug("Is this VNFD in use - msg:%s", vnfd_id)
+        if vnfd_id in self._vnfds:
+            return self._vnfds[vnfd_id].in_use()
+        return False
+
+    @asyncio.coroutine
+    def publish_vnfr(self, xact, path, msg):
+        """ Publish a VNFR """
+        self._log.debug("publish_vnfr called with path %s, msg %s",
+                        path, msg)
+        yield from self.vnfr_handler.update(xact, path, msg)
+
+    def create_vnfd(self, vnfd):
+        """ Create a virtual network function descriptor """
+        self._log.debug("Create virtual networkfunction descriptor - %s", vnfd)
+        if vnfd.id in self._vnfds:
+            self._log.error("Cannot create VNFD %s -VNFD id already exists", vnfd)
+            raise VirtualNetworkFunctionDescriptorError("VNFD already exists-%s", vnfd.id)
+
+        self._vnfds[vnfd.id] = VirtualNetworkFunctionDescriptor(self._dts,
+                                                                self._log,
+                                                                self._loop,
+                                                                self,
+                                                                vnfd)
+        return self._vnfds[vnfd.id]
+
+    def update_vnfd(self, vnfd):
+        """ update the Virtual Network Function descriptor """
+        self._log.debug("Update virtual network function descriptor - %s", vnfd)
+
+        # Hack to remove duplicates from leaf-lists - to be fixed by RIFT-6511
+        for ivld in vnfd.internal_vld:
+            ivld.internal_connection_point_ref = list(set(ivld.internal_connection_point_ref))
+
+        if vnfd.id not in self._vnfds:
+            self._log.debug("No VNFD found - creating VNFD id = %s", vnfd.id)
+            self.create_vnfd(vnfd)
+        else:
+            self._log.debug("Updating VNFD id = %s, vnfd = %s", vnfd.id, vnfd)
+            self._vnfds[vnfd.id].update(vnfd)
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        """ Delete the Virtual Network Function descriptor with the passed id """
+        self._log.debug("Deleting the virtual network function descriptor - %s", vnfd_id)
+        if vnfd_id not in self._vnfds:
+            self._log.debug("Delete VNFD failed - cannot find vnfd-id %s", vnfd_id)
+            raise VirtualNetworkFunctionDescriptorNotFound("Cannot find %s", vnfd_id)
+
+        if self._vnfds[vnfd_id].in_use():
+            self._log.debug("Cannot delete VNFD id %s reference exists %s",
+                            vnfd_id,
+                            self._vnfds[vnfd_id].ref_count)
+            raise VirtualNetworkFunctionDescriptorRefCountExists(
+                "Cannot delete :%s, ref_count:%s",
+                vnfd_id,
+                self._vnfds[vnfd_id].ref_count)
+
+        # Remove any files uploaded with VNFD and stored under $RIFT_ARTIFACTS/libs/<id>
+        try:
+            rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
+            vnfd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', vnfd_id)
+            if os.path.exists(vnfd_dir):
+                shutil.rmtree(vnfd_dir, ignore_errors=True)
+        except Exception as e:
+            self._log.error("Exception in cleaning up VNFD {}: {}".
+                            format(self._vnfds[vnfd_id].name, e))
+            self._log.exception(e)
+
+        del self._vnfds[vnfd_id]
+
+    def vnfd_refcount_xpath(self, vnfd_id):
+        """ xpath for ref count entry """
+        return (VnfdRefCountDtsHandler.XPATH +
+                "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
+
+    @asyncio.coroutine
+    def get_vnfd_refcount(self, vnfd_id):
+        """ Get the vnfd_list from this VNFM"""
+        vnfd_list = []
+        if vnfd_id is None or vnfd_id == "":
+            for vnfd in self._vnfds.values():
+                vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+                vnfd_msg.vnfd_id_ref = vnfd.id
+                vnfd_msg.instance_ref_count = vnfd.ref_count
+                vnfd_list.append((self.vnfd_refcount_xpath(vnfd.id), vnfd_msg))
+        elif vnfd_id in self._vnfds:
+                vnfd_msg.vnfd_id_ref = self._vnfds[vnfd_id].id
+                vnfd_msg.instance_ref_count = self._vnfds[vnfd_id].ref_count
+                vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
+
+        return vnfd_list
+
+
+class VnfmTasklet(rift.tasklets.Tasklet):
+    """ VNF Manager tasklet class """
+    def __init__(self, *args, **kwargs):
+        super(VnfmTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("vnfm")
+
+        self._dts = None
+        self._vnfm = None
+
+    def start(self):
+        try:
+            super(VnfmTasklet, self).start()
+            self.log.info("Starting VnfmTasklet")
+
+            self.log.setLevel(logging.DEBUG)
+
+            self.log.debug("Registering with dts")
+            self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                          RwVnfmYang.get_schema(),
+                                          self.loop,
+                                          self.on_dts_state_change)
+
+            self.log.debug("Created DTS Api GI Object: %s", self._dts)
+        except Exception:
+            print("Caught Exception in VNFM start:", sys.exc_info()[0])
+            raise
+
+    def on_instance_started(self):
+        """ Task insance started callback """
+        self.log.debug("Got instance started callback")
+
+    def stop(self):
+        try:
+            self._dts.deinit()
+        except Exception:
+            print("Caught Exception in VNFM stop:", sys.exc_info()[0])
+            raise
+
+    @asyncio.coroutine
+    def init(self):
+        """ Task init callback """
+        try:
+            vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name()
+            assert vm_parent_name is not None
+            self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name)
+            yield from self._vnfm.run()
+        except Exception:
+            print("Caught Exception in VNFM init:", sys.exc_info()[0])
+            raise
+
+    @asyncio.coroutine
+    def run(self):
+        """ Task run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py b/rwlaunchpad/plugins/rwvnfm/rwvnfmtasklet.py
new file mode 100755 (executable)
index 0000000..37ada1a
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwvnfmtasklet
+
+class Tasklet(rift.tasklets.rwvnfmtasklet.VnfmTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwvns/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b10d81d
--- /dev/null
@@ -0,0 +1,51 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Austin Cormier
+# Creation Date: 05/15/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwvnstasklet)
+
+set(subdirs yang vala)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+  FILES
+    rift/vlmgr/__init__.py
+    rift/vlmgr/rwvlmgr.py
+    rift/topmgr/__init__.py
+    rift/topmgr/rwtopmgr.py
+    rift/topmgr/rwtopdatastore.py
+    rift/topmgr/core.py
+    rift/topmgr/mock.py
+    rift/topmgr/sdnsim.py
+    rift/tasklets/${TASKLET_NAME}/__init__.py
+    rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
+  COMPONENT ${PKG_LONG_NAME}
+  PYTHON3_ONLY)
diff --git a/rwlaunchpad/plugins/rwvns/Makefile b/rwlaunchpad/plugins/rwvns/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/__init__.py
new file mode 100644 (file)
index 0000000..6b68c19
--- /dev/null
@@ -0,0 +1 @@
+from .rwvnstasklet import VnsTasklet
diff --git a/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py b/rwlaunchpad/plugins/rwvns/rift/tasklets/rwvnstasklet/rwvnstasklet.py
new file mode 100755 (executable)
index 0000000..1f88824
--- /dev/null
@@ -0,0 +1,458 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import logging
+import os
+import sys
+
+import gi
+gi.require_version('RwVnsYang', '1.0')
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+    RwVnsYang,
+    RwSdnYang,
+    RwDts as rwdts,
+    RwTypes,
+    ProtobufC,
+)
+
+import rift.tasklets
+
+from rift.vlmgr import (
+    VlrDtsHandler,
+    VldDtsHandler,
+    VirtualLinkRecord,
+)
+
+from rift.topmgr import (
+    NwtopStaticDtsHandler,
+    NwtopDiscoveryDtsHandler,
+    NwtopDataStore,
+    SdnAccountMgr,
+)
+
+
+class SdnInterfaceError(Exception):
+    """ SDN interface creation Error """
+    pass
+
+
+class SdnPluginError(Exception):
+    """ SDN plugin creation Error """
+    pass
+
+
+class VlRecordError(Exception):
+    """ Vlr Record creation Error """
+    pass
+
+
+class VlRecordNotFound(Exception):
+    """ Vlr Record not found"""
+    pass
+
+class SdnAccountError(Exception):
+    """ Error while creating/deleting/updating SDN Account"""
+    pass
+
+class SdnAccountNotFound(Exception):
+    pass
+
+class SDNAccountDtsOperdataHandler(object):
+    def __init__(self, dts, log, loop, parent):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._parent = parent
+
+    def _register_show_status(self):
+        def get_xpath(sdn_name=None):
+            return "D,/rw-sdn:sdn-account{}/rw-sdn:connection-status".format(
+                    "[name='%s']" % sdn_name if sdn_name is not None else ''
+                   )
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            path_entry = RwSdnYang.SDNAccountConfig.schema().keyspec_to_entry(ks_path)
+            sdn_account_name = path_entry.key00.name
+            self._log.debug("Got show sdn connection status request: %s", ks_path.create_string())
+
+            try:
+                saved_accounts = self._parent._acctmgr.get_saved_sdn_accounts(sdn_account_name)
+                for account in saved_accounts:
+                    sdn_acct = RwSdnYang.SDNAccountConfig()
+                    sdn_acct.from_dict(account.as_dict())
+
+                    self._log.debug("Responding to sdn connection status request: %s", sdn_acct.connection_status)
+                    xact_info.respond_xpath(
+                            rwdts.XactRspCode.MORE,
+                            xpath=get_xpath(account.name),
+                            msg=sdn_acct.connection_status,
+                            )
+            except KeyError as e:
+                self._log.warning(str(e))
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    def _register_validate_rpc(self):
+        def get_xpath():
+            return "/rw-sdn:update-sdn-status"
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            if not msg.has_field("sdn_account"):
+                raise SdnAccountNotFound("SDN account name not provided")
+
+            sdn_account_name = msg.sdn_account
+            account = self._parent._acctmgr.get_sdn_account(sdn_account_name)
+            if account is None:
+                self._log.warning("SDN account %s does not exist", sdn_account_name)
+                xact_info.respond_xpath(rwdts.XactRspCode.NA)
+                return
+
+            self._parent._acctmgr.start_validate_credentials(self._loop, sdn_account_name)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        yield from self._dts.register(
+                xpath=get_xpath(),
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_prepare=on_prepare
+                    ),
+                flags=rwdts.Flag.PUBLISHER,
+                )
+
+    @asyncio.coroutine
+    def register(self):
+        yield from self._register_show_status()
+        yield from self._register_validate_rpc()
+
+class SDNAccountDtsHandler(object):
+    XPATH = "C,/rw-sdn:sdn-account"
+
+    def __init__(self, dts, log, parent):
+        self._dts = dts
+        self._log = log
+        self._parent = parent
+
+        self._sdn_account = {}
+
+    def _set_sdn_account(self, account):
+        self._log.info("Setting sdn account: {}".format(account))
+        if account.name in self._sdn_account:
+            self._log.error("SDN Account with name %s already exists. Ignoring config", account.name);
+        self._sdn_account[account.name]  = account
+        self._parent._acctmgr.set_sdn_account(account)
+
+    def _del_sdn_account(self, account_name):
+        self._log.info("Deleting sdn account: {}".format(account_name))
+        del self._sdn_account[account_name]
+
+        self._parent._acctmgr.del_sdn_account(account_name)
+
+    def _update_sdn_account(self, account):
+        self._log.info("Updating sdn account: {}".format(account))
+        # No need to update locally saved sdn_account's updated fields, as they
+        # are not used anywhere. Call the parent's update callback.
+        self._parent._acctmgr.update_sdn_account(account)
+
+    @asyncio.coroutine
+    def register(self):
+        def apply_config(dts, acg, xact, action, _):
+            self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return RwTypes.RwStatus.SUCCESS
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare callback from DTS for SDN Account config """
+
+            self._log.info("SDN Cloud account config received: %s", msg)
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if fref.is_field_deleted():
+                # Delete the sdn account record
+                self._del_sdn_account(msg.name)
+            else:
+                # If the account already exists, then this is an update.
+                if msg.name in self._sdn_account:
+                    self._log.debug("SDN account already exists. Invoking on_prepare update request")
+                    if msg.has_field("account_type"):
+                        errmsg = "Cannot update SDN account's account-type."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Update the sdn account record
+                    self._update_sdn_account(msg)
+                else:
+                    self._log.debug("SDN account does not already exist. Invoking on_prepare add request")
+                    if not msg.has_field('account_type'):
+                        errmsg = "New SDN account must contain account-type field."
+                        self._log.error(errmsg)
+                        xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+                                                   SDNAccountDtsHandler.XPATH,
+                                                   errmsg)
+                        raise SdnAccountError(errmsg)
+
+                    # Set the sdn account record
+                    self._set_sdn_account(msg)
+
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+
+        self._log.debug("Registering for Sdn Account config using xpath: %s",
+                        SDNAccountDtsHandler.XPATH,
+                        )
+
+        acg_handler = rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_config,
+                        )
+
+        with self._dts.appconf_group_create(acg_handler) as acg:
+            acg.register(
+                    xpath=SDNAccountDtsHandler.XPATH,
+                    flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+                    on_prepare=on_prepare
+                    )
+
+
+class VnsManager(object):
+    """ The Virtual Network Service Manager """
+    def __init__(self, dts, log, log_hdl, loop):
+        self._dts = dts
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
+        self._vld_handler = VldDtsHandler(dts, log, loop, self)
+        self._sdn_handler = SDNAccountDtsHandler(dts,log,self)
+        self._sdn_opdata_handler = SDNAccountDtsOperdataHandler(dts,log, loop, self)
+        self._acctmgr = SdnAccountMgr(self._log, self._log_hdl, self._loop)
+        self._nwtopdata_store = NwtopDataStore(log)
+        self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
+        self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctmgr, self._nwtopdata_store)
+        self._vlrs = {}
+
+    @asyncio.coroutine
+    def register_vlr_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  DTS VLR handler")
+        yield from self._vlr_handler.register()
+
+    @asyncio.coroutine
+    def register_vld_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  DTS VLD handler")
+        yield from self._vld_handler.register()
+
+    @asyncio.coroutine
+    def register_sdn_handler(self):
+        """ Register vlr DTS handler """
+        self._log.debug("Registering  SDN Account config handler")
+        yield from self._sdn_handler.register()
+        yield from self._sdn_opdata_handler.register()
+
+    @asyncio.coroutine
+    def register_nwtopstatic_handler(self):
+        """ Register static NW topology DTS handler """
+        self._log.debug("Registering  static DTS NW topology handler")
+        yield from self._nwtopstatic_handler.register()
+
+    @asyncio.coroutine
+    def register_nwtopdiscovery_handler(self):
+        """ Register discovery-based NW topology DTS handler """
+        self._log.debug("Registering  discovery-based DTS NW topology handler")
+        yield from self._nwtopdiscovery_handler.register()
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register all static DTS handlers"""
+        yield from self.register_sdn_handler()
+        yield from self.register_vlr_handler()
+        yield from self.register_vld_handler()
+        yield from self.register_nwtopstatic_handler()
+        # Not used for now
+        yield from self.register_nwtopdiscovery_handler()
+
+    def create_vlr(self, msg):
+        """ Create VLR """
+        if msg.id in self._vlrs:
+            err = "Vlr id %s already exists" % msg.id
+            self._log.error(err)
+            # raise VlRecordError(err)
+            return self._vlrs[msg.id]
+
+        self._log.info("Creating VirtualLinkRecord %s", msg.id)
+        self._vlrs[msg.id] = VirtualLinkRecord(self._dts,
+                                               self._log,
+                                               self._loop,
+                                               self,
+                                               msg,
+                                               msg.res_id
+                                               )
+        return self._vlrs[msg.id]
+
+    def get_vlr(self, vlr_id):
+        """  Get VLR by vlr id """
+        return self._vlrs[vlr_id]
+
+    @asyncio.coroutine
+    def delete_vlr(self, vlr_id, xact):
+        """ Delete VLR with the passed id"""
+        if vlr_id not in self._vlrs:
+            err = "Delete Failed - Vlr id %s not found" % vlr_id
+            self._log.error(err)
+            raise VlRecordNotFound(err)
+
+        self._log.info("Deleting virtual link id %s", vlr_id)
+        yield from self._vlrs[vlr_id].terminate(xact)
+        del self._vlrs[vlr_id]
+        self._log.info("Deleted virtual link id %s", vlr_id)
+
+    def find_vlr_by_vld_id(self, vld_id):
+        """ Find a VLR matching the VLD Id """
+        for vlr in self._vlrs.values():
+            if vlr.vld_id == vld_id:
+                return vlr
+        return None
+
+    @asyncio.coroutine
+    def run(self):
+        """ Run this VNSM instance """
+        self._log.debug("Run VNSManager - registering static DTS handlers")
+        yield from self.register()
+
+    def vld_in_use(self, vld_id):
+        """ Is this VLD in use """
+        return False
+
+    @asyncio.coroutine
+    def publish_vlr(self, xact, path, msg):
+        """ Publish a VLR """
+        self._log.debug("Publish vlr called with path %s, msg %s",
+                        path, msg)
+        yield from self._vlr_handler.update(xact, path, msg)
+
+    @asyncio.coroutine
+    def unpublish_vlr(self, xact, path):
+        """ Publish a VLR """
+        self._log.debug("Unpublish vlr called with path %s", path)
+        yield from self._vlr_handler.delete(xact, path)
+
+
+class VnsTasklet(rift.tasklets.Tasklet):
+    """ The VNS tasklet class """
+    def __init__(self, *args, **kwargs):
+        super(VnsTasklet, self).__init__(*args, **kwargs)
+        self.rwlog.set_category("rw-mano-log")
+        self.rwlog.set_subcategory("vns")
+
+        self._dts = None
+        self._vlr_handler = None
+
+        self._vnsm = None
+        # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
+        self._vlrs = {}
+
+    def start(self):
+        super(VnsTasklet, self).start()
+        self.log.info("Starting VnsTasklet")
+
+        self.log.debug("Registering with dts")
+        self._dts = rift.tasklets.DTS(self.tasklet_info,
+                                      RwVnsYang.get_schema(),
+                                      self.loop,
+                                      self.on_dts_state_change)
+
+        self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+    def on_instance_started(self):
+        """ The task instance started callback"""
+        self.log.debug("Got instance started callback")
+
+    def stop(self):
+      try:
+         self._dts.deinit()
+      except Exception:
+         print("Caught Exception in VNS stop:", sys.exc_info()[0])
+         raise
+
+    @asyncio.coroutine
+    def init(self):
+        """ task init callback"""
+        self._vnsm = VnsManager(dts=self._dts,
+                                log=self.log,
+                                log_hdl=self.log_hdl,
+                                loop=self.loop)
+        yield from self._vnsm.run()
+
+        # NSM needs to detect VLD deletion that has active VLR
+        # self._vld_handler = VldDescriptorConfigDtsHandler(
+        #         self._dts, self.log, self.loop, self._vlrs,
+        #         )
+        # yield from self._vld_handler.register()
+
+    @asyncio.coroutine
+    def run(self):
+        """ tasklet run callback """
+        pass
+
+    @asyncio.coroutine
+    def on_dts_state_change(self, state):
+        """Take action according to current dts state to transition
+        application into the corresponding application state
+
+        Arguments
+            state - current dts state
+        """
+        switch = {
+            rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+            rwdts.State.CONFIG: rwdts.State.RUN,
+        }
+
+        handlers = {
+            rwdts.State.INIT: self.init,
+            rwdts.State.RUN: self.run,
+        }
+
+        # Transition application to next state
+        handler = handlers.get(state, None)
+        if handler is not None:
+            yield from handler()
+
+        # Transition dts to next state
+        next_state = switch.get(state, None)
+        if next_state is not None:
+            self._dts.handle.set_state(next_state)
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/__init__.py
new file mode 100644 (file)
index 0000000..f570abc
--- /dev/null
@@ -0,0 +1,37 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Ravi Chamarty
+# Creation Date: 10/28/2015
+# 
+
+from .rwtopmgr import (
+    NwtopDiscoveryDtsHandler,
+    NwtopStaticDtsHandler,
+    SdnAccountMgr,
+)
+
+from .rwtopdatastore import (
+    NwtopDataStore,
+)
+
+try:
+    from .sdnsim import SdnSim
+    from .core import Topology
+    from .mock import Mock
+
+except ImportError as e:
+    print("Error: Unable to load sdn implementation: %s" % str(e))
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/core.py
new file mode 100644 (file)
index 0000000..dd3ad2f
--- /dev/null
@@ -0,0 +1,49 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+
+#from . import exceptions
+
+
+def unsupported(f):
+    @functools.wraps(f)
+    def impl(*args, **kwargs):
+        msg = '{} not supported'.format(f.__name__)
+        raise exceptions.RWErrorNotSupported(msg)
+
+    return impl
+
+
+class Topology(object):
+    """
+    Topoology defines a base class for sdn driver implementations. Note that
+    not all drivers will support the complete set of functionality presented
+    here.
+    """
+
+    @unsupported
+    def get_network_list(self, account):
+        """
+        Returns the discovered network associated with the specified account.
+
+        @param account - a SDN account
+
+        @return a discovered network
+        """
+        pass
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/mock.py
new file mode 100644 (file)
index 0000000..cc0e489
--- /dev/null
@@ -0,0 +1,50 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import mock
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+from gi.repository import RwcalYang
+
+from . import core
+
+import logging
+
+logger = logging.getLogger('rwsdn.mock')
+
+class Mock(core.Topology):
+    """This class implements the abstract methods in the Topology class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        super(Mock, self).__init__()
+
+        m = mock.MagicMock()
+
+        create_default_topology()
+
+    def get_network_list(self, account):
+        """
+        Returns the discovered network
+
+        @param account - a SDN account
+
+        """
+        logger.debug("Not yet implemented")
+        return None
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopdatastore.py
new file mode 100644 (file)
index 0000000..ad021a8
--- /dev/null
@@ -0,0 +1,186 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    IetfNetworkYang,
+    IetfNetworkTopologyYang,
+    IetfL2TopologyYang,
+    RwTopologyYang,
+    RwTypes
+)
+import logging
+from gi.repository.RwTypes import RwStatus
+
+
+class NwtopDataStore(object):
+    """ Common datastore for discovered and static topologies """
+    def __init__(self, log):
+        self._networks = {}
+        self._log = log
+
+    """ Deep copy utility for topology class """
+    def rwtop_copy_object(self, obj):
+        dup = obj.__class__()
+        dup.copy_from(obj)
+        return dup
+
+    """ Utility for updating L2 topology attributes """
+    def _update_l2_attr(self, current_elem, new_elem, new_l2_attr, attr_field_name):
+        if not getattr(current_elem, attr_field_name):
+           self._log.debug ("Creating L2 attributes..%s", l2_attr_field)
+           setattr(current_elem, attr_field_name, new_l2_attr)
+           return
+
+        for l2_attr_field in new_l2_attr.fields:
+             l2_elem_attr_value = getattr(new_l2_attr, l2_attr_field)
+             if l2_elem_attr_value:
+                 self._log.debug ("Updating L2 attributes..%s", l2_attr_field)
+                 setattr(getattr(current_elem, attr_field_name), l2_attr_field, getattr(new_l2_attr, l2_attr_field))
+
+    """ Utility for updating termination point attributes """
+    def _update_termination_point(self, current_node, new_node, new_tp):
+        current_tp = next((x for x in current_node.termination_point if x.tp_id == new_tp.tp_id), None)
+        if current_tp is None:
+            self._log.debug("Creating termination point..%s", new_tp)
+            # Add tp to current node
+            new_tp_dup = self.rwtop_copy_object(new_tp)
+            current_node.termination_point.append(new_tp_dup)
+            return
+        # Update current tp
+        for tp_field in new_tp.fields:
+            tp_field_value = getattr(new_tp, tp_field)
+            if tp_field_value:
+                self._log.debug("Updating termination point..%s", tp_field)
+                if (tp_field == 'tp_id'):
+                    # Don't change key
+                    pass
+                elif (tp_field == 'l2_termination_point_attributes'):
+                    self._update_l2_attr(current_tp, new_tp, tp_field_value, tp_field)
+                elif (tp_field == 'supporting_termination_point'):
+                    self._log.debug(tp_field)
+                else:
+                    self._log.info("Updating termination point..Not implemented %s", tp_field)
+                    #raise NotImplementedError
+
+    """ Utility for updating link attributes """
+    def _update_link(self, current_nw, new_nw, new_link):
+        current_link = next((x for x in current_nw.link if x.link_id == new_link.link_id), None)
+        if current_link is None:
+            # Add link to current nw
+            self._log.info("Creating link..%s", new_link )
+            new_link_dup = self.rwtop_copy_object(new_link)
+            current_nw.link.append(new_link_dup)
+            return
+        # Update current link
+        for link_field in new_link.fields:
+            link_field_value = getattr(new_link, link_field)
+            if link_field_value:
+                self._log.info("Updating link..%s", link_field)
+                if (link_field == 'link_id'):
+                    # Don't change key
+                    pass
+                elif (link_field == 'source'):
+                    if getattr(link_field_value, 'source_node') is not None:
+                       current_link.source.source_node = getattr(link_field_value, 'source_node')
+                    if getattr(link_field_value, 'source_tp') is not None:
+                       current_link.source.source_tp = getattr(link_field_value, 'source_tp')
+                elif (link_field == 'destination'):
+                    if getattr(link_field_value, 'dest_node') is not None:
+                       current_link.destination.dest_node = link_field_value.dest_node
+                    if getattr(link_field_value, 'dest_tp') is not None:
+                       current_link.destination.dest_tp = link_field_value.dest_tp
+                elif (link_field == 'l2_link_attributes'):
+                    self._update_l2_attr(current_link, new_link, link_field_value, link_field)
+                elif (link_field == 'supporting_link'):
+                    self._log.debug(link_field)
+                else:
+                    self._log.info("Update link..Not implemented %s", link_field)
+                    #raise NotImplementedError
+
+
+    """ Utility for updating node attributes """
+    def _update_node(self, current_nw, new_nw, new_node):
+        current_node = next((x for x in current_nw.node if x.node_id == new_node.node_id), None)
+        if current_node is None:
+            # Add node to current nw
+            self._log.debug("Creating node..%s", new_node)
+            new_node_dup = self.rwtop_copy_object(new_node)
+            current_nw.node.append(new_node_dup)
+            return
+        # Update current node
+        for node_field in new_node.fields:
+            node_field_value = getattr(new_node, node_field)
+            if node_field_value:
+                self._log.debug("Updating node..%s", node_field)
+                if (node_field == 'node_id'):
+                    # Don't change key
+                    pass
+                elif (node_field == 'l2_node_attributes'):
+                    self._update_l2_attr(current_node, new_node, node_field_value, node_field)
+                elif (node_field == 'termination_point'):
+                    for tp in new_node.termination_point:
+                        self._update_termination_point(current_node, new_node, tp)
+                elif (node_field == 'supporting-node'):
+                    self._log.debug(node_field)
+                else:
+                    self._log.info("Update node..Not implemented %s", node_field)
+                    #raise NotImplementedError
+
+
+    """ API for retrieving internal network """
+    def get_network(self, network_id):
+        if (network_id not in self._networks):
+            return None
+        return self._networks[network_id]
+
+    """ API for creating internal network """
+    def create_network(self, key, nw):
+        self._networks[key] = self.rwtop_copy_object(nw)
+
+    """ API for updating internal network """
+    def update_network(self, key, new_nw):
+        if key not in self._networks:
+            self._log.debug("Creating network..New_nw %s", new_nw)
+            self._networks[key] = self.rwtop_copy_object(new_nw)
+            return
+        # Iterating thru changed fields
+        for nw_field in new_nw.fields:
+            nw_field_value = getattr(new_nw, nw_field)
+            self._log.debug("Update nw..nw_field %s", nw_field)
+            if nw_field_value:
+                if (nw_field == 'node'):
+                    for node in new_nw.node:
+                        self._update_node(self._networks[key], new_nw, node)
+                elif (nw_field == 'network_id'):
+                    # Don't change key
+                    pass
+                elif (nw_field == 'link'):
+                    for link in new_nw.link:
+                        self._update_link(self._networks[key], new_nw, link)
+                elif (nw_field == 'network_types'):
+                    self._networks[key].network_types.l2_network = self._networks[key].network_types.l2_network.new()
+                elif (nw_field == 'l2_network_attributes'):
+                    self._update_l2_attr(self._networks[key], new_nw, nw_field_value, nw_field)
+                else:
+                    self._log.info("Update nw..Not implemented %s", nw_field)
+                    #raise NotImplementedError
+
+        
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/rwtopmgr.py
new file mode 100755 (executable)
index 0000000..b095fbc
--- /dev/null
@@ -0,0 +1,329 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    RwDts as rwdts,
+    IetfNetworkYang,
+    IetfNetworkTopologyYang,
+    IetfL2TopologyYang,
+    RwTopologyYang,
+    RwsdnYang,
+    RwTypes
+)
+
+from gi.repository.RwTypes import RwStatus
+import rw_peas
+import rift.tasklets
+
+class SdnGetPluginError(Exception):
+    """ Error while fetching SDN plugin """
+    pass
+  
+  
+class SdnGetInterfaceError(Exception):
+    """ Error while fetching SDN interface"""
+    pass
+
+
+class SdnAccountMgr(object):
+    """ Implements the interface to backend plugins to fetch topology """
+    def __init__(self, log, log_hdl, loop):
+        self._account = {}
+        self._log = log
+        self._log_hdl = log_hdl
+        self._loop = loop
+        self._sdn = {}
+
+        self._regh = None
+
+        self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                status='unknown',
+                details="Connection status lookup not started"
+                )
+
+        self._validate_task = None
+
+    def set_sdn_account(self,account):
+        if (account.name in self._account):
+            self._log.error("SDN Account is already set")
+        else:
+            sdn_account           = RwsdnYang.SDNAccount()
+            sdn_account.from_dict(account.as_dict())
+            sdn_account.name = account.name
+            self._account[account.name] = sdn_account
+            self._log.debug("Account set is %s , %s",type(self._account), self._account)
+            self.start_validate_credentials(self._loop, account.name)
+
+    def del_sdn_account(self, name):
+        self._log.debug("Account deleted is %s , %s", type(self._account), name)
+        del self._account[name]
+
+    def update_sdn_account(self,account):
+        self._log.debug("Account updated is %s , %s", type(self._account), account)
+        if account.name in self._account:
+            sdn_account = self._account[account.name]
+
+            sdn_account.from_dict(
+                account.as_dict(),
+                ignore_missing_keys=True,
+                )
+            self._account[account.name] = sdn_account
+            self.start_validate_credentials(self._loop, account.name)
+
+    def get_sdn_account(self, name):
+        """
+        Creates an object for class RwsdnYang.SdnAccount()
+        """
+        if (name in self._account):
+            return self._account[name]
+        else:
+            self._log.error("ERROR : SDN account is not configured") 
+
+    def get_saved_sdn_accounts(self, name):
+        ''' Get SDN Account corresponding to passed name, or all saved accounts if name is None'''
+        saved_sdn_accounts = []
+
+        if name is None or name == "":
+            sdn_accounts = list(self._account.values())
+            saved_sdn_accounts.extend(sdn_accounts)
+        elif name in self._account:
+            account = self._account[name]
+            saved_sdn_accounts.append(account)
+        else:
+            errstr = "SDN account {} does not exist".format(name)
+            raise KeyError(errstr)
+
+        return saved_sdn_accounts
+
+    def get_sdn_plugin(self,name):
+        """
+        Loads rw.sdn plugin via libpeas
+        """
+        if (name in self._sdn):
+            return self._sdn[name]
+        account = self.get_sdn_account(name)
+        plugin_name = getattr(account, account.account_type).plugin_name
+        self._log.info("SDN plugin being created")
+        plugin = rw_peas.PeasPlugin(plugin_name, 'RwSdn-1.0')
+        engine, info, extension = plugin()
+
+        self._sdn[name] = plugin.get_interface("Topology")
+        try:
+            rc = self._sdn[name].init(self._log_hdl)
+            assert rc == RwStatus.SUCCESS
+        except:
+            self._log.error("ERROR:SDN plugin instantiation failed ")
+        else:
+            self._log.info("SDN plugin successfully instantiated")
+        return self._sdn[name]
+
+    @asyncio.coroutine
+    def validate_sdn_account_credentials(self, loop, name):
+        self._log.debug("Validating SDN Account credentials %s", name)
+        self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                status="validating",
+                details="SDN account connection validation in progress"
+                )
+
+        _sdnacct = self.get_sdn_account(name)
+        if (_sdnacct is None):
+            raise SdnGetPluginError
+        _sdnplugin = self.get_sdn_plugin(name)
+        if (_sdnplugin is None):
+            raise SdnGetInterfaceError
+
+        rwstatus, status = yield from loop.run_in_executor(
+                None,
+                _sdnplugin.validate_sdn_creds,
+                _sdnacct,
+                )
+
+        if rwstatus == RwTypes.RwStatus.SUCCESS:
+            self._status = RwsdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+        else:
+            self._status = RwsdnYang.SDNAccount_ConnectionStatus(
+                    status="failure",
+                    details="Error when calling CAL validate sdn creds"
+                    )
+
+        self._log.info("Got sdn account validation response: %s", self._status)
+        _sdnacct.connection_status = self._status
+
+    def start_validate_credentials(self, loop, name):
+        if self._validate_task is not None:
+            self._validate_task.cancel()
+            self._validate_task = None
+
+        self._validate_task = asyncio.ensure_future(
+                self.validate_sdn_account_credentials(loop, name),
+                loop=loop
+                )
+
+
+class NwtopDiscoveryDtsHandler(object):
+    """ Handles DTS interactions for the Discovered Topology registration """
+    DISC_XPATH = "D,/nd:network"
+
+    def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._acctmgr = acctmgr
+        self._nwdatastore = nwdatastore
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle associated with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the Discovered Topology path """
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            """  On_ready for Discovered Topology registration """
+            self._log.debug("PUB reg ready for Discovered Topology handler regn_hdl(%s) status %s",
+                                         regh, status)
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare for Discovered Topology registration"""
+            self._log.debug(
+                "Got topology on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.READ:
+                
+                for name in self._acctmgr._account:
+                    _sdnacct = self._acctmgr.get_sdn_account(name)
+                    if (_sdnacct is None):
+                        raise SdnGetPluginError
+
+                    _sdnplugin = self._acctmgr.get_sdn_plugin(name)
+                    if (_sdnplugin is None):
+                        raise SdnGetInterfaceError
+
+                    rc, nwtop = _sdnplugin.get_network_list(_sdnacct)
+                    #assert rc == RwStatus.SUCCESS
+                    if rc != RwStatus.SUCCESS:
+                        self._log.error("Fetching get network list for SDN Account %s failed", name)
+                        xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+                        return
+                    
+                    self._log.debug("Topology: Retrieved network attributes ")
+                    for nw in nwtop.network:
+                        # Add SDN account name
+                        nw.rw_network_attributes.sdn_account_name = name
+                        nw.server_provided = False
+                        nw.network_id = name + ':' + nw.network_id
+                        self._log.debug("...Network id %s", nw.network_id)
+                        nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+                        xact_info.respond_xpath(rwdts.XactRspCode.MORE,
+                                        nw_xpath, nw)
+
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                #err = "%s action on discovered Topology not supported" % action
+                #raise NotImplementedError(err)
+
+        self._log.debug("Registering for discovered topology using xpath %s", NwtopDiscoveryDtsHandler.DISC_XPATH)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+            on_ready=on_ready,
+            on_prepare=on_prepare,
+            )
+
+        yield from self._dts.register(
+            NwtopDiscoveryDtsHandler.DISC_XPATH,
+            flags=rwdts.Flag.PUBLISHER,
+            handler=handler
+            )
+
+
+class NwtopStaticDtsHandler(object):
+    """ Handles DTS interactions for the Static Topology registration """
+    STATIC_XPATH = "C,/nd:network"
+
+    def __init__(self, dts, log, loop, acctmgr, nwdatastore):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._acctmgr = acctmgr
+
+        self._regh = None
+        self.pending = {}
+        self._nwdatastore = nwdatastore
+
+    @property
+    def regh(self):
+        """ The registration handle associated with this Handler"""
+        return self._regh
+    
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the Static Topology path """
+
+        @asyncio.coroutine
+        def prepare_nw_cfg(dts, acg, xact, xact_info, ksp, msg, scratch):
+            """Prepare for application configuration. Stash the pending
+            configuration object for subsequent transaction phases"""
+            self._log.debug("Prepare Network config received network id %s, msg %s",
+                           msg.network_id, msg)
+            self.pending[xact.id] = msg
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+        def apply_nw_config(dts, acg, xact, action, scratch):
+            """Apply the pending configuration object"""
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                return
+
+            if xact.id not in self.pending:
+                raise KeyError("No stashed configuration found with transaction id [{}]".format(xact.id))
+
+            try:
+                if action == rwdts.AppconfAction.INSTALL:
+                    self._nwdatastore.create_network(self.pending[xact.id].network_id, self.pending[xact.id])
+                elif action == rwdts.AppconfAction.RECONCILE:
+                    self._nwdatastore.update_network(self.pending[xact.id].network_id, self.pending[xact.id])
+            except:
+                raise 
+
+            self._log.debug("Create network config done")
+            return RwTypes.RwStatus.SUCCESS
+
+        self._log.debug("Registering for static topology using xpath %s", NwtopStaticDtsHandler.STATIC_XPATH)
+        handler=rift.tasklets.AppConfGroup.Handler(
+                        on_apply=apply_nw_config)
+
+        with self._dts.appconf_group_create(handler=handler) as acg:
+            acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH, 
+                                   flags = rwdts.Flag.SUBSCRIBER, 
+                                   on_prepare=prepare_nw_cfg)
+
+
diff --git a/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py b/rwlaunchpad/plugins/rwvns/rift/topmgr/sdnsim.py
new file mode 100644 (file)
index 0000000..4a6b93b
--- /dev/null
@@ -0,0 +1,76 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+from . import core
+import logging
+
+import xml.etree.ElementTree as etree
+from gi.repository import RwTopologyYang as RwTl
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import RwYang
+
+
+logger = logging.getLogger(__name__)
+
+
+class SdnSim(core.Topology):
+    def __init__(self):
+        super(SdnSim, self).__init__()
+        self._model = RwYang.Model.create_libncx()
+        self._model.load_schema_ypbc(RwTl.get_schema())
+
+    def get_network_list(self, account):
+        """
+        Returns the discovered network
+
+        @param account - a SDN account
+
+        """
+
+        nwtop = RwTl.YangData_IetfNetwork()
+        #topology_source = "/net/boson/home1/rchamart/work/topology/l2_top.xml"
+        if not account.sdnsim.has_field('topology_source') or account.sdnsim.topology_source is None:
+            return nwtop
+        topology_source = account.sdnsim.topology_source
+        logger.info("Reading topology file: %s", topology_source)
+        if 'json' in topology_source: 
+            with open(topology_source,'r') as f:
+                print("Reading static topology file")
+                op_json = f.read()
+                nwtop.from_json(self._model,op_json)
+                for nw in nwtop.network:
+                   nw.server_provided = False
+                   logger.debug("...Network id %s", nw.network_id)
+                   #nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+                   #xact_info.respond_xpath(rwdts.XactRspCode.MORE,
+                   #                 nw_xpath, nw)
+        elif 'xml' in topology_source:
+            tree = etree.parse(topology_source)
+            root = tree.getroot()
+            xmlstr = etree.tostring(root, encoding="unicode")
+
+            # The top level topology object does not have XML conversion
+            # Hence going one level down
+            #l2nw1 = nwtop.network.add()
+            #l2nw1.from_xml_v2(self._model, xmlstr)
+            nwtop.from_xml_v2(self._model,xmlstr)
+
+            logger.debug("Returning topology data imported from XML file")
+
+        return nwtop
diff --git a/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py b/rwlaunchpad/plugins/rwvns/rift/vlmgr/__init__.py
new file mode 100644 (file)
index 0000000..2bdb77a
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Ravi Chamarty
+# Creation Date: 9/2/2015
+# 
+
+from .rwvlmgr import (
+    VirtualLinkRecordState,
+    VirtualLinkRecord,
+    VlrDtsHandler,
+    VldDtsHandler,
+)
diff --git a/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py b/rwlaunchpad/plugins/rwvns/rift/vlmgr/rwvlmgr.py
new file mode 100755 (executable)
index 0000000..bdea4ef
--- /dev/null
@@ -0,0 +1,483 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import enum
+import uuid
+import time
+
+import gi
+gi.require_version('RwVlrYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+from gi.repository import (
+    RwVlrYang,
+    VldYang,
+    RwDts as rwdts,
+    RwResourceMgrYang,
+)
+import rift.tasklets
+
+
+class NetworkResourceError(Exception):
+    """ Network Resource Error """
+    pass
+
+
+class VlrRecordExistsError(Exception):
+    """ VLR record already exists"""
+    pass
+
+
+class VlRecordError(Exception):
+    """ VLR record error """
+    pass
+
+
+class VirtualLinkRecordState(enum.Enum):
+    """ Virtual Link record state """
+    INIT = 1
+    INSTANTIATING = 2
+    RESOURCE_ALLOC_PENDING = 3
+    READY = 4
+    TERMINATING = 5
+    TERMINATED = 6
+    FAILED = 10
+
+
+class VirtualLinkRecord(object):
+    """
+        Virtual Link Record object
+    """
+    def __init__(self, dts, log, loop, vnsm, vlr_msg, req_id=None):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+        self._vlr_msg = vlr_msg
+
+        self._network_id = None
+        self._network_pool = None
+        self._assigned_subnet = None
+        self._create_time = int(time.time())
+        if req_id == None:
+            self._request_id = str(uuid.uuid4())
+        else:
+            self._request_id = req_id
+
+        self._state = VirtualLinkRecordState.INIT
+        self._state_failed_reason = None
+
+    @property
+    def vld_xpath(self):
+        """ VLD xpath associated with this VLR record """
+        return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id)
+
+    @property
+    def vld_id(self):
+        """ VLD id associated with this VLR record """
+        return self._vlr_msg.vld_ref
+
+    @property
+    def vlr_id(self):
+        """ VLR id associated with this VLR record """
+        return self._vlr_msg.id
+
+    @property
+    def xpath(self):
+        """ path for this VLR """
+        return("D,/vlr:vlr-catalog"
+               "/vlr:vlr[vlr:id='{}']".format(self.vlr_id))
+
+    @property
+    def name(self):
+        """ Name of this VLR """
+        return self._vlr_msg.name
+
+    @property
+    def cloud_account_name(self):
+        """ Cloud Account to instantiate the virtual link on """
+        return self._vlr_msg.cloud_account
+
+    @property
+    def resmgr_path(self):
+        """ path for resource-mgr"""
+        return ("D,/rw-resource-mgr:resource-mgmt" +
+                "/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id))
+
+    @property
+    def operational_status(self):
+        """ Operational status of this VLR"""
+        op_stats_dict = {"INIT": "init",
+                         "INSTANTIATING": "vl_alloc_pending",
+                         "RESOURCE_ALLOC_PENDING": "vl_alloc_pending",
+                         "READY": "running",
+                         "FAILED": "failed",
+                         "TERMINATING": "vl_terminate_pending",
+                         "TERMINATED": "terminated"}
+
+        return op_stats_dict[self._state.name]
+
+    @property
+    def msg(self):
+        """ VLR message for this VLR """
+        msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr()
+        msg.copy_from(self._vlr_msg)
+
+        if self._network_id is not None:
+            msg.network_id = self._network_id
+
+        if self._network_pool is not None:
+            msg.network_pool = self._network_pool
+
+        if self._assigned_subnet is not None:
+            msg.assigned_subnet = self._assigned_subnet
+
+        msg.operational_status = self.operational_status
+        msg.operational_status_details = self._state_failed_reason
+        msg.res_id = self._request_id
+
+        return msg
+
+    @property
+    def resmgr_msg(self):
+        """ VLR message for this VLR """
+        msg = RwResourceMgrYang.VirtualLinkEventData()
+        msg.event_id = self._request_id
+        msg.cloud_account = self.cloud_account_name
+        msg.request_info.name = self.name
+        msg.request_info.vim_network_name = self._vlr_msg.vim_network_name
+        msg.request_info.provider_network.from_dict(
+                self._vlr_msg.provider_network.as_dict()
+                )
+        if self._vlr_msg.has_field('ip_profile_params'):
+            msg.request_info.ip_profile_params.from_dict(self._vlr_msg.ip_profile_params.as_dict())
+
+        return msg
+
+    @asyncio.coroutine
+    def create_network(self, xact):
+        """ Create network for this VL """
+        self._log.debug("Creating network req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "create"))
+
+    @asyncio.coroutine
+    def delete_network(self, xact):
+        """ Delete network for this VL """
+        self._log.debug("Deleting network - req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "delete"))
+
+    @asyncio.coroutine
+    def read_network(self, xact):
+        """ Read network for this VL """
+        self._log.debug("Reading network - req-id: %s", self._request_id)
+        return (yield from self.request_network(xact, "read"))
+
+    @asyncio.coroutine
+    def request_network(self, xact, action):
+        """Request creation/deletion network for this VL """
+
+        block = xact.block_create()
+
+        if action == "create":
+            self._log.debug("Creating network path:%s, msg:%s",
+                            self.resmgr_path, self.resmgr_msg)
+            block.add_query_create(self.resmgr_path, self.resmgr_msg)
+        elif action == "delete":
+            self._log.debug("Deleting network path:%s", self.resmgr_path)
+            if self.resmgr_msg.request_info.name != "multisite":
+                block.add_query_delete(self.resmgr_path)
+        elif action == "read":
+            self._log.debug("Reading network path:%s", self.resmgr_path)
+            block.add_query_read(self.resmgr_path)
+        else:
+            raise VlRecordError("Invalid action %s received" % action)
+
+        res_iter = yield from block.execute(now=True)
+
+        resp = None
+
+        if action == "create" or action == "read":
+            for i in res_iter:
+                r = yield from i
+                resp = r.result
+
+            if resp is None:
+                raise NetworkResourceError("Did not get a network resource response (resp: %s)", resp)
+
+            if resp.has_field('resource_info') and resp.resource_info.resource_state == "failed":
+                raise NetworkResourceError(resp.resource_info.resource_errors)
+
+            if not (resp.has_field('resource_info') and
+                    resp.resource_info.has_field('virtual_link_id')):
+                raise NetworkResourceError("Did not get a valid network resource response (resp: %s)", resp)
+
+            self._log.debug("Got network request response: %s", resp)
+
+        return resp
+
+    @asyncio.coroutine
+    def instantiate(self, xact, restart=0):
+        """ Instantiate this VL """
+        self._state = VirtualLinkRecordState.INSTANTIATING
+
+        self._log.debug("Instantiating VLR path = [%s]", self.xpath)
+
+        try:
+            self._state = VirtualLinkRecordState.RESOURCE_ALLOC_PENDING
+
+            if restart == 0:
+              network_resp = yield from self.create_network(xact)
+            else:
+              network_resp = yield from self.read_network(xact)
+              if network_resp == None:
+                network_resp = yield from self.create_network(xact)
+
+            # Note network_resp.virtual_link_id is CAL assigned network_id.
+
+            self._network_id = network_resp.resource_info.virtual_link_id
+            self._network_pool = network_resp.resource_info.pool_name
+            self._assigned_subnet = network_resp.resource_info.subnet
+
+            self._state = VirtualLinkRecordState.READY
+
+            yield from self.publish(xact)
+
+        except Exception as e:
+            self._log.error("Instantiatiation of  VLR record failed: %s", str(e))
+            self._state = VirtualLinkRecordState.FAILED
+            self._state_failed_reason = str(e)
+            yield from self.publish(xact)
+
+    @asyncio.coroutine
+    def publish(self, xact):
+        """ publish this VLR """
+        vlr = self.msg
+        self._log.debug("Publishing VLR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+        vlr.create_time = self._create_time
+        yield from self._vnsm.publish_vlr(xact, self.xpath, self.msg)
+        self._log.debug("Published VLR path = [%s], record = [%s]",
+                        self.xpath, self.msg)
+
+    @asyncio.coroutine
+    def terminate(self, xact):
+        """ Terminate this VL """
+        if self._state not in [VirtualLinkRecordState.READY, VirtualLinkRecordState.FAILED]:
+            self._log.error("Ignoring terminate for VL %s is in %s state",
+                            self.vlr_id, self._state)
+            return
+
+        if self._state == VirtualLinkRecordState.READY:
+            self._log.debug("Terminating VL with id %s", self.vlr_id)
+            self._state = VirtualLinkRecordState.TERMINATING
+            try:
+                yield from self.delete_network(xact)
+            except Exception:
+                self._log.exception("Caught exception while deleting VL %s", self.vlr_id)
+            self._log.debug("Terminated VL with id %s", self.vlr_id)
+
+        yield from self.unpublish(xact)
+        self._state = VirtualLinkRecordState.TERMINATED
+
+    @asyncio.coroutine
+    def unpublish(self, xact):
+        """ Unpublish this VLR """
+        self._log.debug("UnPublishing VLR id %s", self.vlr_id)
+        yield from self._vnsm.unpublish_vlr(xact, self.xpath)
+        self._log.debug("UnPublished VLR id %s", self.vlr_id)
+
+
+class VlrDtsHandler(object):
+    """ Handles DTS interactions for the VLR registration """
+    XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+    def __init__(self, dts, log, loop, vnsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle assocaited with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for the VLR path """
+        def on_commit(xact_info):
+            """ The transaction has been committed """
+            self._log.debug("Got vlr commit (xact_info: %s)", xact_info)
+
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_event(dts, g_reg, xact, xact_event, scratch_data):
+            @asyncio.coroutine
+            def instantiate_realloc_vlr(vlr):
+                """Re-populate the virtual link information after restart
+
+                Arguments:
+                    vlink
+
+                """
+
+                with self._dts.transaction(flags=0) as xact:
+                  yield from vlr.instantiate(xact, 1)
+
+            if (xact_event == rwdts.MemberEvent.INSTALL):
+              curr_cfg = self.regh.elements
+              for cfg in curr_cfg:
+                vlr = self._vnsm.create_vlr(cfg)
+                self._loop.create_task(instantiate_realloc_vlr(vlr))
+
+            self._log.debug("Got on_event")
+            return rwdts.MemberRspCode.ACTION_OK
+
+        @asyncio.coroutine
+        def on_prepare(xact_info, action, ks_path, msg):
+            """ prepare for VLR registration"""
+            self._log.debug(
+                "Got vlr on_prepare callback (xact_info: %s, action: %s): %s",
+                xact_info, action, msg
+                )
+
+            if action == rwdts.QueryAction.CREATE:
+                vlr = self._vnsm.create_vlr(msg)
+                with self._dts.transaction(flags=0) as xact:
+                    yield from vlr.instantiate(xact)
+                self._log.debug("Responding to VL create request path:%s, msg:%s",
+                                vlr.xpath, vlr.msg)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath=vlr.xpath, msg=vlr.msg)
+                return
+            elif action == rwdts.QueryAction.DELETE:
+                # Delete an VLR record
+                schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema()
+                path_entry = schema.keyspec_to_entry(ks_path)
+                self._log.debug("Terminating VLR id %s", path_entry.key00.id)
+                yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact)
+            else:
+                err = "%s action on VirtualLinkRecord not supported" % action
+                raise NotImplementedError(err)
+            xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+            return
+
+        self._log.debug("Registering for VLR using xpath: %s",
+                        VlrDtsHandler.XPATH)
+
+        reg_handle = rift.tasklets.DTS.RegistrationHandler(
+            on_commit=on_commit,
+            on_prepare=on_prepare,
+            )
+        handlers = rift.tasklets.Group.Handler(on_event=on_event,)
+        with self._dts.group_create(handler=handlers) as group:
+            self._regh = group.register(
+                xpath=VlrDtsHandler.XPATH,
+                handler=reg_handle,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.DATASTORE,
+                )
+
+    @asyncio.coroutine
+    def create(self, xact, path, msg):
+        """
+        Create a VLR record in DTS with path and message
+        """
+        self._log.debug("Creating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.create_element(path, msg)
+        self._log.debug("Created VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def update(self, xact, path, msg):
+        """
+        Update a VLR record in DTS with path and message
+        """
+        self._log.debug("Updating VLR xact = %s, %s:%s",
+                        xact, path, msg)
+        self.regh.update_element(path, msg)
+        self._log.debug("Updated VLR xact = %s, %s:%s",
+                        xact, path, msg)
+
+    @asyncio.coroutine
+    def delete(self, xact, path):
+        """
+        Delete a VLR record in DTS with path and message
+        """
+        self._log.debug("Deleting VLR xact = %s, %s", xact, path)
+        self.regh.delete_element(path)
+        self._log.debug("Deleted VLR xact = %s, %s", xact, path)
+
+
+class VldDtsHandler(object):
+    """ DTS handler for the VLD registration """
+    XPATH = "C,/vld:vld-catalog/vld:vld"
+
+    def __init__(self, dts, log, loop, vnsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._vnsm = vnsm
+
+        self._regh = None
+
+    @property
+    def regh(self):
+        """ The registration handle assocaited with this Handler"""
+        return self._regh
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register the VLD path """
+        @asyncio.coroutine
+        def on_prepare(xact_info, query_action, ks_path, msg):
+            """ prepare callback on vld path """
+            self._log.debug(
+                "Got on prepare for VLD update (ks_path: %s) (action: %s)",
+                ks_path.to_xpath(VldYang.get_schema()), msg)
+
+            schema = VldYang.YangData_Vld_VldCatalog_Vld.schema()
+            path_entry = schema.keyspec_to_entry(ks_path)
+            vld_id = path_entry.key00.id
+
+            disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE]
+            if query_action not in disabled_actions:
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                return
+
+            vlr = self._vnsm.find_vlr_by_vld_id(vld_id)
+            if vlr is None:
+                self._log.debug(
+                    "Did not find an existing VLR record for vld %s. "
+                    "Permitting %s vld action", vld_id, query_action)
+                xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+                return
+
+            raise VlrRecordExistsError(
+                "Vlr record(s) exists."
+                "Cannot perform %s action on VLD." % query_action)
+
+        handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+
+        yield from self._dts.register(
+            VldDtsHandler.XPATH,
+            flags=rwdts.Flag.SUBSCRIBER,
+            handler=handler
+            )
diff --git a/rwlaunchpad/plugins/rwvns/rwvnstasklet.py b/rwlaunchpad/plugins/rwvns/rwvnstasklet.py
new file mode 100755 (executable)
index 0000000..1f1a044
--- /dev/null
@@ -0,0 +1,28 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+
+import rift.tasklets.rwvnstasklet
+
+class Tasklet(rift.tasklets.rwvnstasklet.VnsTasklet):
+    pass
+
+# vim: sw=4
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedProvNettopology.py
new file mode 100644 (file)
index 0000000..86638f4
--- /dev/null
@@ -0,0 +1,331 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MyProvNetwork(object):
+    def __init__(self, nwtop, l2top, log):
+        self.next_mac = 11
+        self.log = log
+        self.provnet1 = nwtop.network.add()
+        self.provnet1.network_id = "ProviderNetwork-1"
+
+        self.nwtop = nwtop
+        self.l2top = l2top
+
+        # L2 Network type augmentation
+        self.provnet1.network_types.l2_network = self.provnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.provnet1.l2_network_attributes.name = "Rift LAB SFC-Demo Provider Network"
+        ul_net = self.provnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1")
+           self.l2netid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.provnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + ":" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None):
+        logging.debug("Creating node %s", node_name)
+        node = self.provnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node is not None):
+            logging.debug("  Adding support node %s", sup_node.node_id)
+            ul_node = node.supporting_node.add()
+            ul_node.network_ref = self.l2netid
+            ul_node.node_ref = sup_node.node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, vlan = False):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:4f:9c:ab:dd:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        if (vlan == True):
+            tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vlan"
+        else:
+            tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.provnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.provnet1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyProvTopology(MyProvNetwork):
+    def __init__(self, nwtop, l2top, log):
+        super(MyProvTopology, self).__init__(nwtop, l2top, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+        self.pseudo_mgmt_node = self.create_node("Pseudo_mgmt_node", "Pseudo node for VM mgmt network LAN")
+        self.pseudo_dp_node = self.create_node("Pseudo_DP_node", "Pseudo node for DP network LAN")
+
+        self.g118_node = self.l2top.find_node("Grunt118")
+        if (self.g118_node is None):
+           raise MyNodeNotFound()
+        self.g44_node = self.l2top.find_node("Grunt44")
+        if (self.g44_node is None):
+           raise MyNodeNotFound()
+        self.g120_node = self.l2top.find_node("Grunt120")
+        if (self.g120_node is None):
+           raise MyNodeNotFound()
+
+        self.g118_br_int = self.create_node("G118_Br_Int","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node)
+        self.g118_br_eth1 = self.create_node("G118_Br_Eth1","OVS Integration bridge on Grunt118", mgmt_ip_addr="10.66.4.118", sup_node = self.g118_node)
+        # eth2 on g118 is being used in PCI passthrough mode
+
+        self.g44_br_int = self.create_node("G44_Br_Int","OVS Integration bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth1 = self.create_node("G44_Br_Eth1","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth2 = self.create_node("G44_Br_Eth2","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+        self.g44_br_eth3 = self.create_node("G44_Br_Eth3","OVS Interface bridge on Grunt44", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_node)
+
+        self.g120_br_int = self.create_node("G120_Br_Int","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node)
+        self.g120_br_eth1 = self.create_node("G120_Br_Eth1","OVS Integration bridge on Grunt120", mgmt_ip_addr = "10.66.4.120", sup_node = self.g120_node)
+        # eth2 on g120 is being used in PCI passthrough mode
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        self.g118_e1 = self.l2top.find_tp(self.g118_node, "eth1")
+        if (self.g118_e1 is None):
+           raise MyTpNotFound()
+        self.g44_e1 = self.l2top.find_tp(self.g44_node, "eth1")
+        if (self.g44_e1 is None):
+           raise MyTpNotFound()
+        self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        if (self.g44_e2 is None):
+           raise MyTpNotFound()
+        self.g44_e3 = self.l2top.find_tp(self.g44_node, "eth3")
+        if (self.g44_e3 is None):
+           raise MyTpNotFound()
+        self.g120_e1 = self.l2top.find_tp(self.g120_node, "eth1")
+        if (self.g44_e3 is None):
+           raise MyTpNotFound()
+
+        self.g118_br_int_eth1 = self.create_tp(self.g118_br_int, "int-br-eth1")
+        self.g118_br_int_tap1 = self.create_tp(self.g118_br_int, "tap1")
+
+        self.g118_br_eth1_phyeth1 = self.create_tp(self.g118_br_eth1, "phyeth1")
+        self.g118_br_eth1_eth1 = self.create_tp(self.g118_br_eth1, "eth1", sup_node=self.g118_node, sup_tp=self.g118_e1, vlan=True)
+
+        self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth1")
+        self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu1")
+        self.g44_br_int_eth2 = self.create_tp(self.g44_br_int, "int-br-eth2")
+        self.g44_br_int_vhu2 = self.create_tp(self.g44_br_int, "vhu2")
+        self.g44_br_int_eth1 = self.create_tp(self.g44_br_int, "int-br-eth3")
+        self.g44_br_int_vhu1 = self.create_tp(self.g44_br_int, "vhu3")
+
+        self.g44_br_eth1_phyeth1 = self.create_tp(self.g44_br_eth1, "phyeth1")
+        self.g44_br_eth1_dpdk0 = self.create_tp(self.g44_br_eth1, "dpdk0", sup_node=self.g44_node, sup_tp=self.g44_e1, vlan=True)
+
+        self.g44_br_eth2_phyeth1 = self.create_tp(self.g44_br_eth2, "phyeth2")
+        self.g44_br_eth2_dpdk1 = self.create_tp(self.g44_br_eth2, "dpdk1", sup_node=self.g44_node, sup_tp=self.g44_e2)
+
+        self.g44_br_eth3_phyeth1 = self.create_tp(self.g44_br_eth3, "phyeth3")
+        self.g44_br_eth3_dpdk2 = self.create_tp(self.g44_br_eth3, "dpdk2", sup_node=self.g44_node, sup_tp=self.g44_e3)
+
+        self.g120_br_int_eth1 = self.create_tp(self.g120_br_int, "int-br-eth1")
+        self.g120_br_int_tap1 = self.create_tp(self.g120_br_int, "tap1")
+
+        self.g120_br_eth1_phyeth1 = self.create_tp(self.g120_br_eth1, "phyeth1")
+        self.g120_br_eth1_eth1 = self.create_tp(self.g120_br_eth1, "eth1", sup_node=self.g120_node, sup_tp=self.g120_e1, vlan=True)
+
+        self.pmn_eth1 = self.create_tp(self.pseudo_mgmt_node, "eth1")
+        self.pmn_eth2 = self.create_tp(self.pseudo_mgmt_node, "eth2")
+        self.pmn_eth3 = self.create_tp(self.pseudo_mgmt_node, "eth3")
+
+    def setup_links(self):
+        # Add links to provnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_bidir_link(self.g118_br_eth1, self.g118_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth1, "Link_g118_be1_pmn_e1", "Link_pmn_e1_g118_be1")
+        self.create_bidir_link(self.g44_br_eth1, self.g44_br_eth1_dpdk0, self.pseudo_mgmt_node, self.pmn_eth2, "Link_g44_be1_pmn_d0", "Link_pmn_e2_g44_d0")
+        self.create_bidir_link(self.g120_br_eth1, self.g120_br_eth1_eth1, self.pseudo_mgmt_node, self.pmn_eth3, "Link_g120_be1_pmn_e3", "Link_pmn_e3_g120_be1")
+        # Data path links cannot be represented here since PCI pass through is beingused on G118 and G44
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('Provider Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of Provider Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_provtop.xml"
+    xml_formatted_file = "/tmp/stacked_provtop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_provtop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_provtop.json > /tmp/stacked_provtop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_provtop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedSfctopology.py
new file mode 100644 (file)
index 0000000..a27a0b9
--- /dev/null
@@ -0,0 +1,277 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvNetwork
+from create_stackedProvNettopology import MyProvTopology
+from create_stackedVMNettopology import MyVMNetwork
+from create_stackedVMNettopology import MyVMTopology
+
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MySfcNetwork(object):
+    def __init__(self, nwtop, l2top, provtop, vmtop, log):
+        self.next_mac = 81
+        self.log = log
+        self.sfcnet1 = nwtop.network.add()
+        self.sfcnet1.network_id = "SfcNetwork-1"
+
+        self.l2top = l2top
+        self.provtop = provtop
+        self.vmtop = vmtop
+
+        # L2 Network type augmentation
+        self.sfcnet1.network_types.l2_network = self.sfcnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.sfcnet1.l2_network_attributes.name = "Rift LAB SFC-Demo SFC Network"
+        try:
+           self.l2netid = l2top.find_nw_id("L2HostNetwork-1")
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.sfcnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1")
+           self.provnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.sfcnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = vmtop.find_nw_id("VmNetwork-1")
+           self.vmnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.sfcnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = "urn:Rift:Lab:" + node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr = None, sup_node = None, nw_ref = None):
+        logging.debug("Creating node %s", node_name)
+        node = self.sfcnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node is not None):
+            logging.debug("  Adding support node %s", sup_node.node_id)
+            ul_node = node.supporting_node.add()
+            if (nw_ref is not None):
+                ul_node.network_ref = nw_ref
+            else:
+                ul_node.network_ref = self.l2netid
+            ul_node.node_ref = sup_node.node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        #tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:dd:" + str(self.next_mac)
+        #self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:vxlan"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            if (nw_ref is not None):
+                ul_tp.network_ref = nw_ref
+            else:
+                ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_link(self, node1, tp1, node2, tp2, link_name1, link_name2 = None):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.sfcnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        lnk1.l2_link_attributes.rate = 1000000000.00
+
+        # Create bidir link if second link is provided
+        if (link_name2 is not None):
+            lnk2= self.sfcnet1.link.add()
+            lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+            lnk2.source.source_node = node2.node_id
+            lnk2.source.source_tp = tp2.tp_id
+            lnk2.destination.dest_node = node1.node_id
+            lnk2.destination.dest_tp = tp1.tp_id
+            # L2 link augmentation
+            lnk2.l2_link_attributes.name = link_name2
+            lnk2.l2_link_attributes.rate = 1000000000.00
+
+
+class MySfcTopology(MySfcNetwork):
+    def __init__(self, nwtop, l2top, provtop, vmnet, log):
+        super(MySfcTopology, self).__init__(nwtop, l2top, provtop, vmnet, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+
+        self.tg_node = self.vmtop.find_node("Trafgen_VM")
+        if (self.tg_node is None):
+           raise MyNodeNotFound()
+        self.lb_node = self.vmtop.find_node("LB_VM")
+        if (self.lb_node is None):
+           raise MyNodeNotFound()
+
+        self.g44_br_int_node = self.provtop.find_node("G44_Br_Int")
+        if (self.g44_br_int_node is None):
+           raise MyNodeNotFound()
+
+        self.sf1 = self.create_node("SF1","SF on LB VM", sup_node = self.lb_node, nw_ref = self.vmnetid)
+        self.sfc1 = self.create_node("SFC1","SF classifier on Trafgen VM", sup_node = self.tg_node, nw_ref = self.vmnetid)
+        self.sff1 = self.create_node("SFF1","SF forwarder on Grunt44 OVS integration bridge", mgmt_ip_addr="10.66.4.44", sup_node = self.g44_br_int_node, nw_ref = self.provnetid)
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        # FInd L2 hosts
+        #self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        #if (self.g44_e2 is None):
+        #   raise MyTpNotFound()
+
+        self.sfc1_vxlannsh1 = self.create_tp(self.sfc1, "vxlannsh1")
+        self.sf1_vxlannsh1 = self.create_tp(self.sf1, "vxlannsh1")
+        self.sff1_vxlannsh1 = self.create_tp(self.sff1, "vxlannsh1")
+
+
+    def setup_links(self):
+        # Add links to sfcnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sff1, self.sff1_vxlannsh1, "Link_sfc1_sff1")
+        self.create_link(self.sfc1, self.sfc1_vxlannsh1, self.sf1, self.sf1_vxlannsh1, "Link_sff1_sf1", "Link_sf1_sff1")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        #self.setup_links()
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('SFC Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of SFC Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    # Setup VM network topology
+    vmtop = MyVMTopology(nwtop, l2top, provtop, logger)
+    vmtop.setup_all()
+
+    # Setup SFC network topology
+    sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, logger)
+    sfctop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_sfctop.xml"
+    xml_formatted_file = "/tmp/stacked_sfctop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_sfctop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_sfctop.json > /tmp/stacked_sfctop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_sfctop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vxlan\"/\"vxlan\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedVMNettopology.py
new file mode 100644 (file)
index 0000000..99f5898
--- /dev/null
@@ -0,0 +1,332 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+from create_stackedl2topology import MyL2Network
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvNetwork
+from create_stackedProvNettopology import MyProvTopology
+
+class MyNwNotFound(Exception):
+    pass
+
+class MyNodeNotFound(Exception):
+    pass
+
+class MyTpNotFound(Exception):
+    pass
+
+class MyVMNetwork(object):
+    def __init__(self, nwtop, l2top, provtop, log):
+        self.next_mac = 41
+        self.log = log
+        self.vmnet1 = nwtop.network.add()
+        self.vmnet1.network_id = "VmNetwork-1"
+
+        self.nwtop = nwtop
+        self.l2top = l2top
+        self.provtop = provtop
+
+        # L2 Network type augmentation
+        self.vmnet1.network_types.l2_network = self.vmnet1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.vmnet1.l2_network_attributes.name = "Rift LAB SFC-Demo VM Network"
+        ul_net = self.vmnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = l2top.find_nw_id("L2HostNetwork-1")
+           self.l2netid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+        ul_net = self.vmnet1.supporting_network.add()
+        try:
+           ul_net.network_ref = provtop.find_nw_id("ProviderNetwork-1")
+           self.provnetid = ul_net.network_ref
+        except TypeError:
+           raise MyNwNotFound()
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.vmnet1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, description, mgmt_ip_addr=None, sup_node_list=None):
+        logging.debug("Creating node %s", node_name)
+        node = self.vmnet1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        if (mgmt_ip_addr is not None):
+            node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        if (sup_node_list is not None):
+            for sup_node in sup_node_list:
+                logging.debug("  Adding support node %s", sup_node[0].node_id)
+                ul_node = node.supporting_node.add()
+                # Second element is hardcoded as nw ref
+                if (sup_node[1] is not None):
+                    ul_node.network_ref = sup_node[1]
+                else:
+                    ul_node.network_ref = self.l2netid
+                ul_node.node_ref = sup_node[0].node_id
+        return node
+
+    def create_tp(self, node, cfg_tp, sup_node = None, sup_tp = None, nw_ref = None):
+        logging.debug("   Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:5e:8a:ab:cc:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        if ((sup_tp is not None) and (sup_node is not None)):
+            logging.debug("     Adding support terminaton point %s", sup_tp.tp_id)
+            ul_tp = tp.supporting_termination_point.add()
+            if (nw_ref is not None):
+                ul_tp.network_ref = nw_ref
+            else:
+                ul_tp.network_ref = self.l2netid
+            ul_tp.node_ref = sup_node.node_id
+            ul_tp.tp_ref = sup_tp.tp_id
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.vmnet1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.vmnet1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyVMTopology(MyVMNetwork):
+    def __init__(self, nwtop, l2top, provtop, log):
+        super(MyVMTopology, self).__init__(nwtop, l2top, provtop, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+   
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        logging.debug("Setting up nodes")
+
+        self.g118_node = self.l2top.find_node("Grunt118")
+        if (self.g118_node is None):
+           raise MyNodeNotFound()
+        self.g44_node = self.l2top.find_node("Grunt44")
+        if (self.g44_node is None):
+           raise MyNodeNotFound()
+        self.g120_node = self.l2top.find_node("Grunt120")
+        if (self.g120_node is None):
+           raise MyNodeNotFound()
+
+        self.g44_br_int_node = self.provtop.find_node("G44_Br_Int")
+        if (self.g44_br_int_node is None):
+           raise MyNodeNotFound()
+
+        self.pseudo_vm = self.create_node("Pseudo_VM","Pseudo VM to manage eth0 LAN")
+        sup_node_list = [[self.g118_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.tg_vm = self.create_node("Trafgen_VM","Trafgen VM on Grunt118", mgmt_ip_addr="10.0.118.3", sup_node_list = sup_node_list)
+        sup_node_list = [[self.g44_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.lb_vm = self.create_node("LB_VM","LB VM on Grunt44", mgmt_ip_addr="10.0.118.35", sup_node_list = sup_node_list)
+        sup_node_list = [[self.g120_node, self.l2netid], [self.g44_br_int_node, self.provnetid]]
+        self.ts_vm = self.create_node("Trafsink_VM","Trafsink VM on Grunt120", mgmt_ip_addr="10.0.118.4", sup_node_list = sup_node_list)
+
+    def setup_tps(self):
+        logging.debug("Setting up termination points")
+        # FInd L2 hosts
+        self.g118_e2 = self.l2top.find_tp(self.g118_node, "eth2")
+        if (self.g118_e2 is None):
+           raise MyTpNotFound()
+        self.g44_e2 = self.l2top.find_tp(self.g44_node, "eth2")
+        if (self.g44_e2 is None):
+           raise MyTpNotFound()
+        # Find OVS tps
+        self.g44_br_int_vhu2 = self.provtop.find_tp(self.g44_br_int_node, "vhu2")
+        if (self.g44_br_int_vhu2 is None):
+           raise MyTpNotFound()
+        self.g44_br_int_vhu3 = self.provtop.find_tp(self.g44_br_int_node, "vhu3")
+        if (self.g44_br_int_vhu3 is None):
+           raise MyTpNotFound()
+
+        self.pvm_eth1 = self.create_tp(self.pseudo_vm, "eth1") 
+        self.pvm_eth2 = self.create_tp(self.pseudo_vm, "eth2") 
+        self.pvm_eth3 = self.create_tp(self.pseudo_vm, "eth3") 
+
+        self.tg_vm_eth0 = self.create_tp(self.tg_vm, "eth0")
+        self.tg_vm_trafgen11 = self.create_tp(self.tg_vm, "trafgen11", sup_node=self.g118_node, sup_tp=self.g118_e2)
+
+        self.lb_vm_eth0 = self.create_tp(self.lb_vm, "eth0")
+        self.lb_vm_lb21 = self.create_tp(self.lb_vm, "load_balancer21", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu2, nw_ref=self.provnetid)
+        self.lb_vm_lb22 = self.create_tp(self.lb_vm, "load_balancer22", sup_node=self.g44_br_int_node, sup_tp=self.g44_br_int_vhu3, nw_ref=self.provnetid)
+
+        self.ts_vm_eth0 = self.create_tp(self.ts_vm, "eth0")
+        self.ts_vm_trafsink31 = self.create_tp(self.ts_vm, "trafsink31", sup_node=self.g44_node, sup_tp=self.g44_e2)
+
+
+    def setup_links(self):
+        # Add links to vmnet1 network
+        # These links are unidirectional and point-to-point
+        logging.debug("Setting up links")
+        # Bidir Links for OVS bridges
+        self.create_bidir_link(self.tg_vm, self.tg_vm_trafgen11, self.lb_vm, self.lb_vm_lb21, "Link_tg_t11_lb_lb21", "Link_lb_lb21_tg_t11")
+        self.create_bidir_link(self.ts_vm, self.ts_vm_trafsink31, self.lb_vm, self.lb_vm_lb22, "Link_ts_t31_lb_lb22", "Link_lb_lb22_tg_t31")
+
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth1, self.tg_vm, self.tg_vm_eth0, "Link_pvm_e1_tgv_e0", "Link_tgv_e0_pvm_e1")
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth2, self.lb_vm, self.lb_vm_eth0, "Link_pvm_e2_lbv_e0", "Link_lbv_e0_pvm_e2")
+        self.create_bidir_link(self.pseudo_vm, self.pvm_eth3, self.ts_vm, self.ts_vm_eth0, "Link_pvm_e3_tsv_e0", "Link_tsv_e0_pvm_e3")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger('VM Network Topology')
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logger.info('Creating an instance of VM Network Topology')
+
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    # Setup L2 topology
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    # Setup Provider network topology
+    provtop = MyProvTopology(nwtop, l2top, logger)
+    provtop.setup_all()
+
+    # Setup VM network topology
+    vmtop = MyVMTopology(nwtop, l2top, provtop, logger)
+    vmtop.setup_all()
+
+    print ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_vmtop.xml"
+    xml_formatted_file = "/tmp/stacked_vmtop2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    print ("Converting to JSON ")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_vmtop.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_vmtop.json > /tmp/stacked_vmtop2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_vmtop2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+    status = subprocess.call("sed -i -e 's/\"l2t:vlan\"/\"vlan\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py b/rwlaunchpad/plugins/rwvns/test/create_stackedl2topology.py
new file mode 100644 (file)
index 0000000..3ae3e80
--- /dev/null
@@ -0,0 +1,261 @@
+#!/bin/python
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import gi
+gi.require_version('RwYang', '1.0')
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+from gi.repository import RwYang
+from xml.etree import ElementTree as etree
+import subprocess
+import logging
+
+
+class MyL2Network(object):
+    def __init__(self, nwtop, log):
+        self.next_mac = 11
+        self.log = log
+        self.nwtop = nwtop
+        self.l2net1 = nwtop.network.add()
+        self.l2net1.network_id = "L2HostNetwork-1"
+
+        # L2 Network type augmentation
+        self.l2net1.network_types.l2_network = self.l2net1.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.l2net1.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network"
+
+    def get_nw_id(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw.network_id
+
+    def get_nw(self, nw_name):
+        for nw in self.nwtop.network:
+            if (nw.network_id == nw_name):
+                return nw
+
+    def get_node(self, node_name):
+        _node_id = "urn:Rift:Lab:" + node_name
+        for node in self.l2net1.node:
+            if (node.node_id == _node_id):
+                return node
+
+    def get_tp(self, node, tp_name):
+        _tp_id = node.node_id + "_" + tp_name
+        for tp in node.termination_point :
+            if (tp.tp_id == _tp_id):
+                return tp
+
+    def get_link(self, link_name):
+        for link in nw.link :
+            if (link.l2_link_attributes.name == link_name):
+                return link
+
+    def create_node(self, node_name, mgmt_ip_addr, description):
+        logging.debug("Creating node %s", node_name)
+        node = self.l2net1.node.add()
+        node.node_id = "urn:Rift:Lab:" + node_name
+        # L2 Node augmentation
+        node.l2_node_attributes.name = node_name
+        node.l2_node_attributes.description = description
+        node.l2_node_attributes.management_address.append(mgmt_ip_addr)
+        return node
+
+    def create_tp(self, node, cfg_tp):
+        logging.debug("    Creating termination point %s %s", node.l2_node_attributes.name, cfg_tp)
+        tp = node.termination_point.add()
+        tp.tp_id = ("{}_{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        return tp
+
+    def create_bidir_link(self, node1, tp1, node2, tp2, link_name1, link_name2):
+        logging.debug("Creating links %s %s", link_name1, link_name2)
+        lnk1= self.l2net1.link.add()
+        lnk1.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description, node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description)
+        lnk1.source.source_node = node1.node_id
+        lnk1.source.source_tp = tp1.tp_id
+        lnk1.destination.dest_node = node2.node_id
+        lnk1.destination.dest_tp = tp2.tp_id
+        # L2 link augmentation
+        lnk1.l2_link_attributes.name = link_name1
+        #lnk1.l2_link_attributes.rate = 1000000000.00
+
+        lnk2= self.l2net1.link.add()
+        lnk2.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(node2.l2_node_attributes.name, tp2.l2_termination_point_attributes.description, node1.l2_node_attributes.name, tp1.l2_termination_point_attributes.description)
+        lnk2.source.source_node = node2.node_id
+        lnk2.source.source_tp = tp2.tp_id
+        lnk2.destination.dest_node = node1.node_id
+        lnk2.destination.dest_tp = tp1.tp_id
+        # L2 link augmentation
+        lnk2.l2_link_attributes.name = link_name2
+        #lnk2.l2_link_attributes.rate = 1000000000.00
+        return lnk1, lnk2
+
+class MyL2Topology(MyL2Network):
+    def __init__(self, nwtop, log):
+        super(MyL2Topology, self).__init__(nwtop, log)
+
+    def find_nw_id(self, nw_name):
+        return self.get_nw_id(nw_name)
+
+    def find_nw(self, nw_name):
+        return self.get_nw(nw_name)
+
+    def find_node(self, node_name):
+        return self.get_node(node_name)
+
+    def find_tp(self, node, tp_name):
+        return self.get_tp(node, tp_name)
+
+    def find_link(self, link_name):
+        return self.get_link(link_name)
+
+    def setup_nodes(self):
+        self.g118 = self.create_node("Grunt118","10.66.4.118", "Host with OVS and PCI")
+        self.g44 = self.create_node("Grunt44","10.66.4.44", "Host with OVS-DPDK")
+        self.g120 = self.create_node("Grunt120","10.66.4.120", "Host with OVS and PCI")
+        self.hms = self.create_node("HostMgmtSwitch","10.66.4.98", "Switch for host eth0")
+        self.vms = self.create_node("VMMgmtSwitch","10.66.4.55", "Switch for VMs eth0")
+        self.ads = self.create_node("AristaDPSwitch","10.66.4.90", "10 Gbps Switch")
+
+    def setup_tps(self):
+        self.g118_e0 = self.create_tp(self.g118, "eth0")
+        self.g118_e1 = self.create_tp(self.g118, "eth1")
+        self.g118_e2 = self.create_tp(self.g118, "eth2")
+
+        self.g44_e0 = self.create_tp(self.g44, "eth0")
+        self.g44_e1 = self.create_tp(self.g44, "eth1")
+        self.g44_e2 = self.create_tp(self.g44, "eth2")
+        self.g44_e3 = self.create_tp(self.g44, "eth3")
+
+        self.g120_e0 = self.create_tp(self.g120, "eth0")
+        self.g120_e1 = self.create_tp(self.g120, "eth1")
+        self.g120_e2 = self.create_tp(self.g120, "eth2")
+
+        self.hms_e1 = self.create_tp(self.hms, "eth1")
+        self.hms_e2 = self.create_tp(self.hms, "eth2")
+        self.hms_e3 = self.create_tp(self.hms, "eth3")
+
+        self.vms_e1 = self.create_tp(self.vms, "eth1")
+        self.vms_e2 = self.create_tp(self.vms, "eth2")
+        self.vms_e3 = self.create_tp(self.vms, "eth3")
+
+        self.ads_57 = self.create_tp(self.ads, "Card_5:Port_7")
+        self.ads_58 = self.create_tp(self.ads, "Card_8:Port_8")
+        self.ads_47 = self.create_tp(self.ads, "Card_4:Port_7")
+        self.ads_48 = self.create_tp(self.ads, "Card_4:Port_8")
+
+    def setup_links(self):
+        # Add links to l2net1 network
+        # These links are unidirectional and point-to-point
+        # Bidir Links for Grunt118
+        self.create_bidir_link(self.g118, self.g118_e0, self.hms, self.hms_e1, "Link_g118_e0_hms_e1", "Link_hms_e1_g118_e0")
+        self.create_bidir_link(self.g118, self.g118_e1, self.vms, self.vms_e1, "Link_g118_e1_vms_e1", "Link_vms_e1_g118_e1")
+        self.create_bidir_link(self.g118, self.g118_e2, self.ads, self.ads_57, "Link_g118_e2_ads_47", "Link_ads_47_g118_e2")
+        # Bidir Links for Grunt44
+        self.create_bidir_link(self.g44, self.g44_e0, self.hms, self.hms_e2, "Link_g44_e0_hms_e1", "Link_hms_e1_g44_e0")
+        self.create_bidir_link(self.g44, self.g44_e1, self.vms, self.vms_e2, "Link_g44_e1_vms_e1", "Link_vms_e1_g44_e1")
+        self.create_bidir_link(self.g44, self.g44_e2, self.ads, self.ads_47, "Link_g44_e2_ads_47", "Link_ads_47_g44_e2")
+        self.create_bidir_link(self.g44, self.g44_e3, self.ads, self.ads_48, "Link_g44_e3_ads_48", "Link_ads_48_g44_e3")
+        # Bidir Links for Grunt120
+        self.create_bidir_link(self.g120, self.g120_e0, self.hms, self.hms_e3, "Link_g120_e0_hms_e1", "Link_hms_e1_g120_e0")
+        self.create_bidir_link(self.g120, self.g120_e1, self.vms, self.vms_e3, "Link_g120_e1_vms_e1", "Link_vms_e1_g120_e1")
+        self.create_bidir_link(self.g120, self.g120_e2, self.ads, self.ads_58, "Link_g120_e2_ads_58", "Link_ads_58_g120_e2")
+
+    def setup_all(self):
+        self.setup_nodes()
+        self.setup_tps()
+        self.setup_links()
+
+def adjust_xml_file(infile, outfile, begin_marker, end_marker):
+    buffer = []
+    in_block = False
+    max_interesting_line_toread = 1
+    interesting_line = 0
+    with open(infile) as inf:
+        with open(outfile, 'w') as outf:
+            for line in inf:
+                if begin_marker in line:
+                    in_block = True
+                    # Go down
+                if end_marker in line:
+                    assert in_block is True
+                    print("End of gathering line...", line)
+                    buffer.append(line)  # gather lines
+                    interesting_line = max_interesting_line_toread
+                    in_block = False
+                    continue
+                if interesting_line:
+                    print("Interesting line printing ...", line)
+                    outf.write(line)
+                    interesting_line -= 1
+                    if interesting_line == 0:  # output gathered lines
+                        for lbuf in buffer:
+                            outf.write(lbuf)
+                        buffer = []  # empty buffer 
+                        print("\n\n")
+                    continue
+
+                if in_block:
+                    print("Gathering line...", line)
+                    buffer.append(line)  # gather lines
+                else:
+                    outf.write(line)
+
+if __name__ == "__main__":
+    model = RwYang.Model.create_libncx()
+    model.load_schema_ypbc(RwTl.get_schema())
+    # create logger 
+    logger = logging.getLogger(__file__)
+    logger.setLevel(logging.DEBUG)
+    logging.basicConfig(level=logging.DEBUG)
+
+    logging.info('Creating an instance of L2 Host Topology')
+    nwtop = RwTl.YangData_IetfNetwork()
+
+    l2top = MyL2Topology(nwtop, logger)
+    l2top.setup_all()
+
+    logging.info ("Converting to XML")
+    # Convert l2nw network to XML
+    xml_str = nwtop.to_xml_v2(model)
+    tree = etree.XML(xml_str)
+    xml_file = "/tmp/stacked_top.xml"
+    xml_formatted_file = "/tmp/stacked_top2.xml"
+    with open(xml_file, "w") as f:
+        f.write(xml_str)
+    status = subprocess.call("xmllint --format " + xml_file + " > " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/xml version/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/root xmlns/d' " + xml_formatted_file, shell=True)
+    status = subprocess.call("sed -i '/\/root/d' " + xml_formatted_file, shell=True)
+
+    logging.info ("Converting to JSON")
+    # Convert set of topologies to JSON
+    json_str = nwtop.to_json(model)
+    with open("/tmp/stacked_top.json", "w") as f:
+        f.write(json_str)
+    status = subprocess.call("python -m json.tool /tmp/stacked_top.json > /tmp/stacked_top2.json", shell=True)
+    json_formatted_file = "/tmp/stacked_top2.json"
+    status = subprocess.call("sed -i -e 's/\"l2t:ethernet\"/\"ethernet\"/g' " + json_formatted_file, shell=True)
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py b/rwlaunchpad/plugins/rwvns/test/test_sdn_mock.py
new file mode 100644 (file)
index 0000000..45e2e80
--- /dev/null
@@ -0,0 +1,101 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+from gi.repository import RwsdnYang
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import RwcalYang
+from gi.repository import IetfNetworkYang
+from gi.repository.RwTypes import RwStatus
+
+
+logger = logging.getLogger('mock')
+
+def get_sdn_account():
+    """
+    Creates an object for class RwsdnYang.SdnAccount()
+    """
+    account                 = RwsdnYang.SDNAccount()
+    account.account_type    = "mock"
+    account.mock.username   = "rift"
+    account.mock.plugin_name = "rwsdn_mock"
+    return account
+
+def get_sdn_plugin():
+    """
+    Loads rw.sdn plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwsdn_mock', 'RwSdn-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+    sdn = plugin.get_interface("Topology")
+    try:
+        rc = sdn.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:SDN plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("Mock SDN plugin successfully instantiated")
+    return sdn
+
+
+
+class SdnMockTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize test plugins
+        """
+        self._acct = get_sdn_account()
+        logger.info("Mock-SDN-Test: setUp")
+        self.sdn   = get_sdn_plugin()
+        logger.info("Mock-SDN-Test: setUpEND")
+
+    def tearDown(self):
+        logger.info("Mock-SDN-Test: Done with tests")
+
+    def test_get_network_list(self):
+        """
+           First test case
+        """
+        rc, nwtop = self.sdn.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS) 
+        logger.debug("SDN-Mock-Test: Retrieved network attributes ")
+        for nw in nwtop.network:
+           logger.debug("...Network id %s", nw.network_id)
+           logger.debug("...Network name %s", nw.l2_network_attributes.name)
+           print(nw)
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py b/rwlaunchpad/plugins/rwvns/test/test_sdn_sim.py
new file mode 100644 (file)
index 0000000..d216f0d
--- /dev/null
@@ -0,0 +1,99 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rw_peas
+import rwlogger
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import RwsdnYang
+from gi.repository import IetfNetworkYang
+from gi.repository.RwTypes import RwStatus
+from gi.repository import RwSdn
+
+
+logger = logging.getLogger('sdnsim')
+
+def get_sdn_account():
+    """
+    Creates an object for class RwsdnYang.SdnAccount()
+    """
+    account                 = RwsdnYang.SDNAccount()
+    account.account_type    = "sdnsim"
+    account.sdnsim.username   = "rift"
+    account.sdnsim.plugin_name = "rwsdn_sim"
+    return account
+
+def get_sdn_plugin():
+    """
+    Loads rw.sdn plugin via libpeas
+    """
+    plugin = rw_peas.PeasPlugin('rwsdn_sim', 'RwSdn-1.0')
+    engine, info, extension = plugin()
+
+    # Get the RwLogger context
+    rwloggerctx = rwlogger.RwLog.Ctx.new("SDN-Log")
+
+    sdn = plugin.get_interface("Topology")
+    try:
+        rc = sdn.init(rwloggerctx)
+        assert rc == RwStatus.SUCCESS
+    except:
+        logger.error("ERROR:SDN sim plugin instantiation failed. Aborting tests")
+    else:
+        logger.info("SDN sim plugin successfully instantiated")
+    return sdn
+
+
+
+class SdnSimTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize test plugins
+        """
+        self._acct = get_sdn_account()
+        logger.info("SDN-Sim-Test: setUp")
+        self.sdn   = get_sdn_plugin()
+        logger.info("SDN-Sim-Test: setUpEND")
+
+    def tearDown(self):
+        logger.info("SDN-Sim-Test: Done with tests")
+
+    def test_get_network_list(self):
+        """
+           First test case
+        """
+        rc, nwtop = self.sdn.get_network_list(self._acct)
+        self.assertEqual(rc, RwStatus.SUCCESS) 
+        logger.debug("SDN-Sim-Test: Retrieved network attributes ")
+        for nw in nwtop.network:
+           logger.debug("...Network id %s", nw.network_id)
+           logger.debug("...Network name %s", nw.l2_network_attributes.name)
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py b/rwlaunchpad/plugins/rwvns/test/test_top_datastore.py
new file mode 100644 (file)
index 0000000..d6c1313
--- /dev/null
@@ -0,0 +1,734 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import datetime
+import logging
+import unittest
+
+import rwlogger
+
+# from gi.repository import IetfNetworkYang
+from gi.repository import IetfL2TopologyYang as l2Tl
+from gi.repository import RwTopologyYang as RwTl
+# from gi.repository.RwTypes import RwStatus
+
+from create_stackedl2topology import MyL2Topology
+
+from rift.topmgr import (
+    NwtopDataStore,
+)
+logger = logging.getLogger('sdntop')
+
+NUM_NWS = 1
+NUM_NODES_L2_NW = 6
+NUM_TPS_L2_NW = 20
+NUM_LINKS = 20
+
+class SdnTopStoreNetworkTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+
+        # Add test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-2"
+        logger.info("SdnTopStoreNetworkTest: setUp")
+
+    def tearDown(self):
+        self.l2net1 = None
+        self.new_l2net = None
+        logger.info("SdnTopStoreNetworkTest: Done with tests")
+
+    def test_create_network(self):
+        """
+           Test: Create first l2 network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreNetworkTest: Create network ")
+        # Get test data
+        # Created durign setup phase
+        assert self.l2net1 is not None
+        # Use data store APIs
+        # Network already stored
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        logger.debug("...Network id %s", nw.network_id)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        logger.debug("...Network name %s", nw.l2_network_attributes.name)
+        for node in nw.node:
+            logger.debug("...Node id %s", node.node_id)
+            num_nodes += 1
+            for tp in node.termination_point:
+                logger.debug("...Tp id %s", tp.tp_id)
+                num_tps += 1
+        self.assertEqual(num_nodes, NUM_NODES_L2_NW)
+        self.assertEqual(num_tps, NUM_TPS_L2_NW)
+
+
+    def test_add_network(self):
+        """
+           Test: Add another network, Check network id
+        """
+        logger.debug("SdnTopStoreNetworkTest: Add network ")
+        # Use data store APIs
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+
+    def test_add_networktype(self):
+        """
+           Test: Add another network, Check network type
+        """
+        logger.debug("SdnTopStoreTest: Add network type ")
+        # Use data store APIs
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+        # Add new test data
+        self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new()
+        logger.debug("Adding update l2net..%s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw.network_types.l2_network)
+
+    def test_add_networkl2name(self):
+        """
+           Test: Add another network, Check L2 network name
+        """
+        logger.debug("SdnTopStoreTest: Add L2 network name ")
+        # Use data store APIs
+        self.new_l2net.network_types.l2_network = self.new_l2net.network_types.l2_network.new()
+        self._nwtopdata_store.create_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-2")
+        self.assertEqual(len(self._nwtopdata_store._networks), 2)
+        # Add new test data
+        self.new_l2net.l2_network_attributes.name = "L2networkName"
+        logger.debug("Adding update l2net..%s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-2", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-2")
+        self.assertEqual(nw.l2_network_attributes.name, "L2networkName")
+
+
+class SdnTopStoreNetworkNodeTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+        self.node2 = self.new_l2net.node.add()
+        self.node2.node_id = "TempNode2"
+        logger.info("SdnTopStoreTest: setUp NetworkNodetest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkNodetest")
+
+
+    def test_add_network_node(self):
+        """
+           Test: Add a node to existing network
+                 Test all parameters
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Add network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+
+    #@unittest.skip("Skipping")
+    def test_update_network_node(self):
+        """
+           Test: Updat a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name2")
+
+    #@unittest.skip("Skipping")
+    def test_update_network_node_l2attr1(self):
+        """
+           Test: Update a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name3"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+
+        # Add test data
+        self.node2.l2_node_attributes.name = "Nice Name4"
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        logger.debug("Node %s", nw.node[NUM_NODES_L2_NW])
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name4")
+
+    def test_update_network_node_l2attr2(self):
+        """
+           Test: Updat a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network node")
+        # Add test data
+        self.node2.node_id = "TempNode2"
+        self.node2.l2_node_attributes.description = "TempNode2 desc"
+        self.node2.l2_node_attributes.name = "Nice Name3"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.description, "TempNode2 desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+
+        # Add test data
+        self.node2.l2_node_attributes.management_address.append("10.0.0.1")
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 1)
+
+        # Add test data
+        self.node2.l2_node_attributes.management_address.append("10.0.0.2")
+        logger.debug("Network %s", self.new_l2net)
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].l2_node_attributes.name, "Nice Name3")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].l2_node_attributes.management_address), 2)
+
+
+class SdnTopStoreNetworkNodeTpTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+        self.node2 = self.new_l2net.node.add()
+        self.node2.node_id = "TempNode2"
+        self.tp1 = self.node2.termination_point.add()
+        self.tp1.tp_id = "TempTp1"
+        logger.info("SdnTopStoreTest: setUp NetworkNodeTptest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkNodeTptest")
+        
+        self.new_l2net = None
+        self.node2 = None
+        self.tp1 = None
+
+    def test_add_network_node_tp(self):
+        """
+           Test: Add a node to existing network
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+
+    def test_update_network_node_tp(self):
+        """
+           Test: Update a tp to existing network, add all tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp1.tp_id = "TempTp1"
+        self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc"
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01"
+        self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+    def test_update_network_node_tp2(self):
+        """
+           Test: Update a tp to existing network, change tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp1.tp_id = "TempTp1"
+        self.tp1.l2_termination_point_attributes.description = "TempTp1 Desc"
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1296
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:01"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1296)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+        # Change frame size
+        self.tp1.l2_termination_point_attributes.maximum_frame_size = 1396
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:01")
+
+        # Change MAC address
+        self.tp1.l2_termination_point_attributes.mac_address = "00:1e:67:98:28:02"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02")
+
+        # Add encapsulation type
+        self.tp1.l2_termination_point_attributes.eth_encapsulation = "l2t:ethernet"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.description, "TempTp1 Desc")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.maximum_frame_size, 1396)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.mac_address, "00:1e:67:98:28:02")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].l2_termination_point_attributes.eth_encapsulation, "l2t:ethernet")
+
+    def test_update_extra_network_node_tp2(self):
+        """
+           Test: Update a tp to existing network, change tp elements
+        """
+        num_nodes = 0
+        num_tps = 0
+        logger.debug("SdnTopStoreTest: Update network ")
+        self.tp2 = self.node2.termination_point.add()
+        self.tp2.tp_id = "TempTp2"
+        # Use data store APIs
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode2")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[1].tp_id, "TempTp2")
+
+
+
+class SdnTopStoreNetworkLinkTest(unittest.TestCase):
+    def setUp(self):
+        """
+          Initialize Top data store
+        """
+        self._nwtopdata_store = NwtopDataStore(logger)
+        self.test_nwtop = RwTl.YangData_IetfNetwork()
+
+        self.l2top = MyL2Topology(self.test_nwtop, logger)
+        self.l2top.setup_all()
+
+        # Get initial test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        # Create initial nw
+        self._nwtopdata_store.create_network("L2HostNetwork-1", self.l2net1)
+        # Get test data
+        self.l2net1 = self.l2top.find_nw("L2HostNetwork-1")
+        assert self.l2net1 is not None
+        self.new_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.new_l2net.network_id = "L2HostNetwork-1"
+
+        self.src_node = self.new_l2net.node.add()
+        self.src_node.node_id = "TempNode1"
+        self.tp1 = self.src_node.termination_point.add()
+        self.tp1.tp_id = "TempTp1"
+
+        self.dest_node = self.new_l2net.node.add()
+        self.dest_node.node_id = "TempNode2"
+        self.tp2 = self.dest_node.termination_point.add()
+        self.tp2.tp_id = "TempTp2"
+        logger.info("SdnTopStoreTest: setUp NetworkLinkTest")
+
+    def tearDown(self):
+        logger.info("SdnTopStoreTest: Done with  NetworkLinkTest")
+        
+        self.new_l2net = None
+        self.src_node = None
+        self.tp1 = None
+        self.dest_node = None
+        self.tp2 = None
+
+    def test_add_network_link(self):
+        """
+           Test: Add a link to existing network
+        """
+        logger.info("SdnTopStoreTest: Update network link")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data created
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(len(nw.link), NUM_LINKS )
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        # Use data store APIs
+        logger.info("SdnTopStoreTest: Update network link - Part 2")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        # Verify data created
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(nw.link[NUM_LINKS].source.source_node, self.src_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS].source.source_tp, self.tp1.tp_id)
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_node, self.dest_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_tp, self.tp2.tp_id)
+        self.assertEqual(len(nw.link), NUM_LINKS + 1)
+
+    def test_add_extra_network_link(self):
+        """
+           Test: Add a link to existing network
+        """
+        logger.info("SdnTopStoreTest: Update extra network link")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+
+        # Add extra link (reverse)
+        self.link2 = self.new_l2net.link.add()
+        self.link2.link_id = "Link2"
+        self.link2.source.source_node = self.dest_node.node_id
+        self.link2.source.source_tp = self.tp2.tp_id
+        self.link2.destination.dest_node = self.src_node.node_id
+        self.link2.destination.dest_tp = self.tp1.tp_id
+        # Use data store APIs
+        logger.info("SdnTopStoreTest: Update extra network link - Part 2")
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        # Verify data created
+        self.assertEqual(nw.link[NUM_LINKS+1].link_id, "Link2")
+        self.assertEqual(len(nw.link), NUM_LINKS + 2)
+        self.assertEqual(nw.link[NUM_LINKS+1].source.source_node, self.dest_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].source.source_tp, self.tp2.tp_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_node, self.src_node.node_id)
+        self.assertEqual(nw.link[NUM_LINKS+1].destination.dest_tp, self.tp1.tp_id)
+
+    def test_add_network_link_l2attr(self):
+        """
+           Test: Check L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Add network link L2 attributes")
+        # Create test state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify data state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+    def test_change_network_link_l2attr(self):
+        """
+           Test: Change L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Change network link L2 attributes")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+        # Create initial state
+        self.test_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.test_l2net.network_id = "L2HostNetwork-1"
+        self.link1 = self.test_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.l2_link_attributes.name = "Link L2 updated name"
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net)
+        # Verify test state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 updated name")
+
+    def test_change_network_link_dest_tp(self):
+        """
+           Test: Change L2 link attributes
+        """
+        logger.info("SdnTopStoreTest: Change network link dest-tp")
+        # Create initial state
+        self.link1 = self.new_l2net.link.add()
+        self.link1.link_id = "Link1"
+        self.link1.source.source_node = self.src_node.node_id
+        self.link1.source.source_tp = self.tp1.tp_id
+        self.link1.destination.dest_node = self.dest_node.node_id
+        self.link1.destination.dest_tp = self.tp2.tp_id
+        self.link1.l2_link_attributes.name = "Link L2 name"
+        self.link1.l2_link_attributes.rate = 10000
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.new_l2net)
+        # Verify initial state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertIsNotNone(nw)
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(len(self._nwtopdata_store._networks), 1)
+        self.assertEqual(len(nw.node), NUM_NODES_L2_NW + 2)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].node_id, "TempNode1")
+        self.assertEqual(len(nw.node[NUM_NODES_L2_NW].termination_point), 1)
+        self.assertEqual(nw.node[NUM_NODES_L2_NW].termination_point[0].tp_id, "TempTp1")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].node_id, "TempNode2")
+        self.assertEqual(nw.node[NUM_NODES_L2_NW+1].termination_point[0].tp_id, "TempTp2")
+        self.assertEqual(nw.link[NUM_LINKS].link_id, "Link1")
+        self.assertEqual(len(nw.link), NUM_LINKS  + 1)
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.name, "Link L2 name")
+        self.assertEqual(nw.link[NUM_LINKS].l2_link_attributes.rate, 10000)
+
+        # Create test state
+        self.test_l2net = RwTl.YangData_IetfNetwork_Network()
+        self.test_l2net.network_id = "L2HostNetwork-1"
+        self.link1 = self.test_l2net.link.add()
+        self.link1.link_id = "Link1"
+        # Changing dest node params
+        self.link1.destination.dest_node = self.src_node.node_id
+        self.link1.destination.dest_tp = self.tp1.tp_id
+        self._nwtopdata_store.update_network("L2HostNetwork-1", self.test_l2net)
+        # Verify test state
+        nw = self._nwtopdata_store.get_network("L2HostNetwork-1")
+        self.assertEqual(nw.network_id, "L2HostNetwork-1")
+        self.assertEqual(nw.link[NUM_LINKS].destination.dest_node,  self.src_node.node_id)
+
+
+
+
+if __name__ == "__main__":
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
+
+
+
+
diff --git a/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py b/rwlaunchpad/plugins/rwvns/test/topmgr_module_test.py
new file mode 100755 (executable)
index 0000000..44e2f5c
--- /dev/null
@@ -0,0 +1,189 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import logging
+import os
+import sys
+import types
+import unittest
+import uuid
+import random
+
+import xmlrunner
+
+import gi
+gi.require_version('CF', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwMain', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+import gi.repository.CF as cf
+import gi.repository.RwDts as rwdts
+import gi.repository.RwMain as rwmain
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.IetfL2TopologyYang as l2Tl
+import gi.repository.RwTopologyYang as RwTl
+import gi.repository.RwLaunchpadYang as launchpadyang
+from gi.repository import RwsdnYang
+from gi.repository.RwTypes import RwStatus
+
+from create_stackedl2topology import MyL2Topology
+from create_stackedProvNettopology import MyProvTopology
+from create_stackedVMNettopology import MyVMTopology
+from create_stackedSfctopology import MySfcTopology
+
+import rw_peas
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class TopMgrTestCase(rift.test.dts.AbstractDTSTest):
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        vns_mgr_dir = os.environ.get('VNS_MGR_DIR')
+
+        cls.rwmain.add_tasklet(vns_mgr_dir, 'rwvnstasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return RwTl.get_schema()
+        
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(1, loop=self.loop)
+
+    @classmethod
+    def configure_timeout(cls):
+        return 360
+
+
+    @asyncio.coroutine
+    def configure_l2_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring l2 network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_prov_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring provider network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_vm_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+
+        vmtop = MyVMTopology(nwtop, l2top, provtop, self.log)
+        vmtop.setup_all()
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring VM network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+    @asyncio.coroutine
+    def configure_sfc_network(self, dts):
+        nwtop = RwTl.YangData_IetfNetwork()
+        l2top = MyL2Topology(nwtop, self.log)
+        l2top.setup_all()
+
+        provtop = MyProvTopology(nwtop, l2top, self.log)
+        provtop.setup_all()
+
+        vmtop = MyVMTopology(nwtop, l2top, provtop, self.log)
+        vmtop.setup_all()
+
+        sfctop = MySfcTopology(nwtop, l2top, provtop, vmtop, self.log)
+        sfctop.setup_all()
+
+        nw_xpath = "C,/nd:network"
+        self.log.info("Configuring SFC network: %s",nwtop)
+        yield from dts.query_create(nw_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    nwtop)
+
+
+    #@unittest.skip("Skipping test_network_config")                            
+    def test_network_config(self):
+        self.log.debug("STARTING - test_network_config")
+        tinfo = self.new_tinfo('static_network')
+        dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+        @asyncio.coroutine
+        def run_test():
+            networks = []
+            computes = []
+
+            yield from asyncio.sleep(120, loop=self.loop)
+            yield from self.configure_l2_network(dts)
+            yield from self.configure_prov_network(dts)
+            yield from self.configure_vm_network(dts)
+            yield from self.configure_sfc_network(dts)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+        self.log.debug("DONE - test_network_config")
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+
+    if 'VNS_MGR_DIR' not in os.environ:
+        os.environ['VNS_MGR_DIR'] = os.path.join(plugin_dir, 'rwvns')
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/CMakeLists.txt
new file mode 100644 (file)
index 0000000..895ee85
--- /dev/null
@@ -0,0 +1,73 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Creation Date: 10/28/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwsdn)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwSdn-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdn_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    ietf_network_yang-1.0 ietf_network_topology_yang-1.0
+    ietf_l2_topology_yang-1.0 rw_topology_yang-1.0
+    rw_log-1.0
+  VAPI_DIRS 
+    ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/
+  GIR_PATHS 
+    ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+    ${RIFT_SUBMODULE_BINARY_ROOT}/rwlaunchpad/plugins/rwvns/yang/
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwcal_yang rwsdn_yang mano_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwsdn_mock
+  rwsdn_sim
+  rwsdn_odl 
+  rwsdn-python
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f8d8a71
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn-plugin rwsdn-plugin.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn-python/rwsdn-plugin.py
new file mode 100644 (file)
index 0000000..374147d
--- /dev/null
@@ -0,0 +1,95 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes)
+
+import rw_status
+import rwlogger
+
+import rift.cal
+import rift.sdn
+
+logger = logging.getLogger('rwsdn')
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+                IndexError: RwTypes.RwStatus.NOTFOUND,
+                KeyError: RwTypes.RwStatus.NOTFOUND,
+
+           })
+
+
+class TopologyPlugin(GObject.Object, RwSdn.Topology):
+    def __init__(self):
+      GObject.Object.__init__(self)
+      self._impl = None
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        providers = {
+            "sdnsim": rift.sdn.SdnSim,
+            "mock": rift.sdn.Mock,
+                }
+
+        logger.addHandler(
+            rwlogger.RwLogger(
+                subcategory="rwsdn",
+                log_hdl=rwlog_ctx,
+            )
+        )
+
+        self._impl = {}
+        for name, impl in providers.items():
+            try:
+                self._impl[name] = impl()
+
+            except Exception:
+                msg = "unable to load SDN implementation for {}"
+                logger.exception(msg.format(name))
+
+    @rwstatus
+    def do_get_network_list(self, account, network_top):
+        obj = self._impl[account.account_type]
+        return obj.get_network_list(account, network_top)
+
+def main():
+    @rwstatus
+    def blah():
+        raise IndexError()
+
+    a = blah()
+    assert(a == RwTypes.RwStatus.NOTFOUND)
+
+    @rwstatus({IndexError: RwTypes.RwStatus.NOTCONNECTED})
+    def blah2():
+        """Some function"""
+        raise IndexError()
+
+    a = blah2()
+    assert(a == RwTypes.RwStatus.NOTCONNECTED)
+    assert(blah2.__doc__ == "Some function")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala b/rwlaunchpad/plugins/rwvns/vala/rwsdn.vala
new file mode 100644 (file)
index 0000000..ec4ab31
--- /dev/null
@@ -0,0 +1,86 @@
+namespace RwSdn {
+
+  public interface Topology: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Credential Validation related APIs
+     */
+    public abstract RwTypes.RwStatus validate_sdn_creds(
+      Rwsdn.SDNAccount account,
+      out Rwsdn.SdnConnectionStatus status);
+
+    /*
+     * Configuring  related APIs
+     */
+    /* TODO */
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus get_network_list(
+      Rwsdn.SDNAccount account,
+      out RwTopology.YangData_IetfNetwork network_topology);
+   
+    /*
+     * VNFFG Chain related APIs
+     */
+    public abstract RwTypes.RwStatus create_vnffg_chain(
+      Rwsdn.SDNAccount account,
+      Rwsdn.VNFFGChain vnffg_chain,
+      out string vnffg_id);
+
+    /*
+     * VNFFG Chain Terminate related APIs
+     */
+    public abstract RwTypes.RwStatus terminate_vnffg_chain(
+      Rwsdn.SDNAccount account,
+      string vnffg_id);
+
+
+    /*
+     * Network related APIs
+     */
+    public abstract RwTypes.RwStatus get_vnffg_rendered_paths(
+      Rwsdn.SDNAccount account,
+      out Rwsdn.VNFFGRenderedPaths rendered_paths);
+
+    /*
+     * Classifier related APIs
+     */
+    public abstract RwTypes.RwStatus create_vnffg_classifier(
+      Rwsdn.SDNAccount account,
+      Rwsdn.VNFFGClassifier vnffg_classifier, 
+      out string vnffg_classifier_id);
+
+    /*
+     * Classifier related APIs
+     */
+    public abstract RwTypes.RwStatus terminate_vnffg_classifier(
+      Rwsdn.SDNAccount account,
+      string vnffg_classifier_id);
+
+
+
+    /*
+     * Node Related APIs
+     */
+     /* TODO */
+
+    /*
+     * Termination-point Related APIs
+     */
+     /* TODO */
+
+    /*
+     * Link Related APIs
+     */
+     /* TODO */
+    
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/CMakeLists.txt
new file mode 100644 (file)
index 0000000..357e2ab
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_mock rwsdn_mock.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_mock/rwsdn_mock.py
new file mode 100644 (file)
index 0000000..2c0ffcc
--- /dev/null
@@ -0,0 +1,172 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import logging
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwTopologyYang as RwTl,
+    RwsdnYang
+    )
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwsdn.mock')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+GRUNT118 = {"name": "grunt118", "ip_addr": "10.66.4.118", "tps": ["eth0"]}
+GRUNT44 = {"name": "grunt44", "ip_addr": "10.66.4.44", "tps": ["eth0"]}
+AS1 = {"name":"AristaSw1", "ip_addr": "10.66.4.54", "tps": ["Ethernet8/7","Ethernet8/8"]}
+NW_NODES = [GRUNT118, GRUNT44, AS1]
+NW_BIDIR_LINKS = [{"src" : ("grunt118","eth0"), "dest" : ("AristaSw1","Ethernet8/7")}, 
+            {"src" : ("grunt44","eth0"), "dest" : ("AristaSw1","Ethernet8/8")}]
+
+
+class DataStore(object):
+    def __init__(self):
+        self.topology = None
+        self.nw = None
+        self.next_mac = 11
+
+    def create_link(self, cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp):
+        lnk= self.nw.link.add()
+        lnk.link_id = "urn:Rift:Lab:Ethernet:{}{}_{}{}".format(cfg_src_node, cfg_src_tp, cfg_dest_node, cfg_dest_tp)
+        lnk.source.source_node = cfg_src_node
+        lnk.source.source_tp = cfg_src_tp
+        lnk.destination.dest_node = cfg_dest_node
+        lnk.destination.dest_tp = cfg_dest_tp
+        # L2 link augmentation
+        lnk.l2_link_attributes.name = cfg_src_tp + cfg_dest_tp
+        lnk.l2_link_attributes.rate = 1000000000.00
+
+    def create_tp(self, node, cfg_tp):
+        tp = node.termination_point.add()
+        tp.tp_id = ("urn:Rift:Lab:{}:{}").format(node.node_id, cfg_tp)
+        # L2 TP augmentation
+        tp.l2_termination_point_attributes.description = cfg_tp
+        tp.l2_termination_point_attributes.maximum_frame_size = 1500
+        tp.l2_termination_point_attributes.mac_address = "00:1e:67:d8:48:" + str(self.next_mac)
+        self.next_mac = self.next_mac + 1
+        tp.l2_termination_point_attributes.tp_state = "in_use"
+        tp.l2_termination_point_attributes.eth_encapsulation = "ethernet"
+
+    def create_node(self, cfg_node):
+        node = self.nw.node.add()
+        node.node_id = cfg_node['name']
+        # L2 Node augmentation
+        node.l2_node_attributes.name = cfg_node['name']
+        node.l2_node_attributes.description = "Host with OVS-DPDK"
+        node.l2_node_attributes.management_address.append(cfg_node['ip_addr'])
+        for cfg_tp in cfg_node['tps']:
+            self.create_tp(node, cfg_tp)
+        
+    def create_default_topology(self):
+        logger.debug('Creating default topology: ')
+
+        self.topology = RwTl.YangData_IetfNetwork()
+        self.nw = self.topology.network.add()
+        self.nw.network_id = "L2HostTopology-Def1"
+        self.nw.server_provided = 'true'
+
+        # L2 Network type augmentation
+        self.nw.network_types.l2_network = self.nw.network_types.l2_network.new()
+        # L2 Network augmentation
+        self.nw.l2_network_attributes.name = "Rift LAB SFC-Demo Host Network"
+
+        for cfg_node in NW_NODES:
+            self.create_node(cfg_node)
+
+        for cfg_link in NW_BIDIR_LINKS:
+            self.create_link(cfg_link['src'][0], cfg_link['src'][1], cfg_link['dest'][0], cfg_link['dest'][1])
+            self.create_link(cfg_link['src'][1], cfg_link['src'][0], cfg_link['dest'][1], cfg_link['dest'][0])
+
+        return self.topology
+        
+class Resources(object):
+    def __init__(self):
+        self.networks = dict()
+        
+
+class MockPlugin(GObject.Object, RwSdn.Topology):
+    """This class implements the abstract methods in the Topology class.
+    Mock is used for unit testing."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.resources = collections.defaultdict(Resources)
+        self.datastore = None
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    subcategory="rwsdn.mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+        account = RwsdnYang.SDNAccount()
+        account.name = 'mock'
+        account.account_type = 'mock'
+        account.mock.username = 'rift'
+
+        self.datastore = DataStore()
+        self.topology = self.datastore.create_default_topology()
+            
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered network
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Get network list: ')
+
+        if (self.topology):
+            logger.debug('Returning network list: ')
+            return self.topology
+
+        logger.debug('Returning empty network list: ')
+        return None
+
+        
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/CMakeLists.txt
new file mode 100644 (file)
index 0000000..239f971
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_odl rwsdn_odl.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_odl/rwsdn_odl.py
new file mode 100644 (file)
index 0000000..3eb39fc
--- /dev/null
@@ -0,0 +1,1082 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+
+import requests
+
+import json
+import re
+import socket
+import time
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwsdnYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+gi.require_version('RwTopologyYang','1.0')
+
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwsdnYang, 
+    RwTopologyYang as RwTl,
+    )
+
+import rw_status
+import rwlogger
+
+
+logger = logging.getLogger('rwsdn.sdnodl')
+logger.setLevel(logging.DEBUG)
+
+
+sff_rest_based = True
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class SdnOdlPlugin(GObject.Object, RwSdn.Topology):
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.sdnodl = SdnOdl()
+        
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-cal-log",
+                    subcategory="odl", 
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_sdn_creds(self, account):
+        """
+        Validates the sdn account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+
+        @param account - a SDN account
+
+        Returns:
+            Validation Code and Details String
+        """
+        #logger.debug('Received validate SDN creds')
+        status = self.sdnodl.validate_account_creds(account)
+        #logger.debug('Done with validate SDN creds: %s', type(status))
+        return status
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered networks
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received Get network list: ')
+        nwtop = self.sdnodl.get_network_list( account)
+        logger.debug('Done with get network list: %s', type(nwtop))
+        return nwtop
+
+    @rwstatus(ret_on_failure=[""])
+    def do_create_vnffg_chain(self, account,vnffg_chain):
+        """
+        Creates Service Function chain in ODL
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received Create VNFFG chain ')
+        vnffg_id = self.sdnodl.create_sfc( account,vnffg_chain)
+        logger.debug('Done with create VNFFG chain with name : %s', vnffg_id)
+        return vnffg_id
+
+    @rwstatus
+    def do_terminate_vnffg_chain(self, account,vnffg_id):
+        """
+        Terminate Service Function chain in ODL
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Received terminate VNFFG chain for id %s ', vnffg_id)
+        # TODO: Currently all the RSP, SFPs , SFFs and SFs are deleted
+        # Need to handle deletion of specific RSP, SFFs, SFs etc
+        self.sdnodl.terminate_all_sfc(account)
+        logger.debug('Done with terminate VNFFG chain with name : %s', vnffg_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_vnffg_rendered_paths(self, account):
+        """
+           Get ODL Rendered Service Path List (SFC)
+
+           @param account - a SDN account
+        """
+        vnffg_list = self.sdnodl.get_rsp_list(account)
+        return vnffg_list 
+
+    @rwstatus(ret_on_failure=[None])
+    def do_create_vnffg_classifier(self, account, vnffg_classifier):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        classifier_name = self.sdnodl.create_sfc_classifier(account,vnffg_classifier)
+        return classifier_name 
+
+    @rwstatus(ret_on_failure=[None])
+    def do_terminate_vnffg_classifier(self, account, vnffg_classifier_name):
+        """
+           Add VNFFG Classifier 
+
+           @param account - a SDN account
+        """
+        self.sdnodl.terminate_sfc_classifier(account,vnffg_classifier_name)
+
+
+class Sff(object):
+    """
+    Create SFF object to hold SFF related details
+    """
+
+    def __init__(self,sff_name, mgmt_address, mgmt_port, dp_address, dp_port,sff_dp_name, sff_br_name=''):
+        self.name = sff_name
+        self.ip = mgmt_address
+        self.sff_rest_port = mgmt_port
+        self.sff_port = dp_port
+        self.dp_name = sff_dp_name
+        self.dp_ip = dp_address
+        self.br_name = sff_br_name
+        self.sf_dp_list = list()
+    
+    def add_sf_dp_to_sff(self,sf_dp):
+        self.sf_dp_list.append(sf_dp)
+
+    def __repr__(self):
+        return 'Name:{},Bridge Name:{}, IP: {}, SF List: {}'.format(self.dp_name,self.br_name, self.ip, self.sf_dp_list) 
+
+class SfDpLocator(object):
+    """
+    Create Service Function Data Plane Locator related Object to hold details related to each DP Locator endpoint
+    """
+    def __init__(self,name,sfdp_id,vnfr_name,vm_id):
+        self.name = name
+        self.port_id = sfdp_id
+        self.vnfr_name = vnfr_name
+        self.vm_id = vm_id
+        self.sff_name = None 
+        self.ovsdb_tp_name = None
+
+    def _update_sff_name(self,sff_name):
+        self.sff_name = sff_name
+
+    def _update_vnf_params(self,service_function_type,address, port,transport_type):
+        self.service_function_type = service_function_type
+        self.address = address
+        self.port = port
+        self.transport_type = "service-locator:{}".format(transport_type)
+
+    def __repr__(self):
+        return 'Name:{},Port id:{}, VNFR ID: {}, VM ID: {}, SFF Name: {}'.format(self.name,self.port_id, self.vnfr_name, self.vm_id,self.sff_name) 
+
+class SdnOdl(object):
+    """
+    SDN ODL Class to support REST based API calls
+    """
+
+    @property
+    def _network_topology_path(self):
+        return 'restconf/operational/network-topology:network-topology'  
+
+    @property
+    def _node_inventory_path(self):
+        return 'restconf/operational/opendaylight-inventory:nodes'  
+     
+    def _network_topology_rest_url(self,account):
+        return '{}/{}'.format(account.odl.url,self._network_topology_path)
+
+    def _node_inventory_rest_url(self,account):
+        return '{}/{}'.format(account.odl.url,self._node_inventory_path)
+
+    def _get_rest_url(self,account, rest_path):
+        return '{}/{}'.format(account.odl.url,rest_path)
+
+
+    def _get_peer_termination_point(self,node_inv,tp_id):
+        for node in node_inv['nodes']['node']:
+            if "node-connector" in node and len(node['node-connector']) > 0:
+                for nodec in node['node-connector']:
+                    if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id):
+                        return(node['id'], nodec['id'])
+        return (None,None)
+
+    def _get_termination_point_mac_address(self,node_inv,tp_id):
+        for node in node_inv['nodes']['node']:
+            if "node-connector" in node and len(node['node-connector']) > 0:
+                for nodec in node['node-connector']:
+                    if ("flow-node-inventory:name" in nodec and nodec["flow-node-inventory:name"] == tp_id):
+                        return nodec.get("flow-node-inventory:hardware-address")
+
+    def _add_host(self,ntwk,node,term_point,vmid,node_inv):
+        for ntwk_node in ntwk.node:
+            if ntwk_node.node_id ==  vmid:
+                break
+        else:
+            ntwk_node = ntwk.node.add()
+            if "ovsdb:bridge-name" in node:
+                ntwk_node.rw_node_attributes.ovs_bridge_name = node["ovsdb:bridge-name"]
+            ntwk_node.node_id = vmid
+            intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id']
+            if intf_id:
+                ntwk_node_tp = ntwk_node.termination_point.add()
+                ntwk_node_tp.tp_id = intf_id[0]['external-id-value']
+                att_mac = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'attached-mac']
+                if att_mac:
+                    ntwk_node_tp.l2_termination_point_attributes.mac_address = att_mac[0]['external-id-value']
+                peer_node,peer_node_tp = self._get_peer_termination_point(node_inv,term_point['tp-id'])
+                if peer_node and peer_node_tp:
+                    nw_lnk = ntwk.link.add()
+                    nw_lnk.source.source_tp = ntwk_node_tp.tp_id
+                    nw_lnk.source.source_node = ntwk_node.node_id
+                    nw_lnk.destination.dest_tp = term_point['tp-id']
+                    nw_lnk.destination.dest_node = node['node-id']
+                    nw_lnk.link_id = peer_node_tp + '-' + 'source'
+
+                    nw_lnk = ntwk.link.add()
+                    nw_lnk.source.source_tp = term_point['tp-id']
+                    nw_lnk.source.source_node = node['node-id']
+                    nw_lnk.destination.dest_tp = ntwk_node_tp.tp_id
+                    nw_lnk.destination.dest_node = ntwk_node.node_id
+                    nw_lnk.link_id = peer_node_tp + '-' + 'dest'
+
+    def _get_address_from_node_inventory(self,node_inv,node_id):
+        for node in node_inv['nodes']['node']:
+            if node['id'] == node_id:
+                return node["flow-node-inventory:ip-address"]
+        return None
+
+    def _fill_network_list(self,nw_topo,node_inventory):
+        """
+        Fill Topology related information
+        """
+        nwtop = RwTl.YangData_IetfNetwork()
+
+        for topo in nw_topo['network-topology']['topology']:
+            if ('node' in topo and len(topo['node']) > 0):
+                ntwk = nwtop.network.add()
+                ntwk.network_id = topo['topology-id']
+                ntwk.server_provided = True
+                for node in topo['node']:
+                    if ('termination-point' in node and len(node['termination-point']) > 0):
+                        ntwk_node = ntwk.node.add()
+                        ntwk_node.node_id = node['node-id']
+                        addr = self._get_address_from_node_inventory(node_inventory,ntwk_node.node_id)
+                        if addr:
+                            ntwk_node.l2_node_attributes.management_address.append(addr)
+                        for term_point in node['termination-point']:
+                            ntwk_node_tp = ntwk_node.termination_point.add()
+                            ntwk_node_tp.tp_id = term_point['tp-id']
+                            mac_address = self._get_termination_point_mac_address(node_inventory,term_point['tp-id'])
+                            if mac_address:
+                                ntwk_node_tp.l2_termination_point_attributes.mac_address = mac_address
+                            if 'ovsdb:interface-external-ids' in term_point:
+                                vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id']
+                                if vm_id:
+                                    vmid = vm_id[0]['external-id-value']
+                                    self._add_host(ntwk,node,term_point,vmid,node_inventory)
+                if ('link' in topo and len(topo['link']) > 0):
+                    for link in topo['link']:
+                        nw_link = ntwk.link.add()
+                        if 'destination' in link:  
+                            nw_link.destination.dest_tp = link['destination'].get('dest-tp')
+                            nw_link.destination.dest_node = link['destination'].get('dest-node')
+                        if 'source' in link:
+                            nw_link.source.source_node = link['source'].get('source-node')
+                            nw_link.source.source_tp = link['source'].get('source-tp')
+                        nw_link.link_id = link.get('link-id')
+        return nwtop
+
+
+    def validate_account_creds(self, account):
+        """
+            Validate the SDN account credentials by accessing the rest API using the provided credentials
+        """
+        status = RwsdnYang.SdnConnectionStatus()
+        url = '{}/{}'.format(account.odl.url,"restconf")
+        try:
+            r=requests.get(url,auth=(account.odl.username,account.odl.password))
+            r.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            msg = "SdnOdlPlugin: SDN account credential validation failed. Exception: %s", str(e)
+            #logger.error(msg)
+            print(msg)
+            status.status = "failure"
+            status.details = "Invalid Credentials: %s" % str(e)
+        except Exception as e:
+            msg = "SdnPdlPlugin: SDN connection failed. Exception: %s", str(e)
+            #logger.error(msg)
+            print(msg)
+            status.status = "failure"
+            status.details = "Connection Failed (Invlaid URL): %s" % str(e)
+        else:
+            print("SDN Successfully connected")
+            status.status = "success"
+            status.details = "Connection was successful"
+
+        return status
+
+    def get_network_list(self, account):
+        """
+           Get the networks details from ODL
+        """
+        url = self._network_topology_rest_url(account)
+        r=requests.get(url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        nw_topo = r.json()
+
+        url = self._node_inventory_rest_url(account)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        node_inventory = r.json()
+        return self._fill_network_list(nw_topo,node_inventory)
+
+    @property
+    def _service_functions_path(self):
+        return 'restconf/config/service-function:service-functions'
+
+    @property
+    def _service_function_path(self):
+        return 'restconf/config/service-function:service-functions/service-function/{}'
+
+    @property
+    def _service_function_forwarders_path(self):
+        return 'restconf/config/service-function-forwarder:service-function-forwarders'
+
+    @property
+    def _service_function_forwarder_path(self):
+        return 'restconf/config/service-function-forwarder:service-function-forwarders/service-function-forwarder/{}'
+
+    @property
+    def _service_function_chains_path(self):
+        return 'restconf/config/service-function-chain:service-function-chains'
+
+    @property
+    def _service_function_chain_path(self):
+        return 'restconf/config/service-function-chain:service-function-chains/service-function-chain/{}'
+   
+    @property
+    def _sfp_metadata_path(self):
+        return 'restconf/config/service-function-path-metadata:service-function-metadata/context-metadata/{}'
+   
+    @property
+    def _sfps_metadata_path(self):
+        return 'restconf/config/service-function-path-metadata:service-function-metadata'
+   
+    @property
+    def _sfps_path(self):
+        return 'restconf/config/service-function-path:service-function-paths'
+
+    @property
+    def _sfp_path(self):
+        return 'restconf/config/service-function-path:service-function-paths/service-function-path/{}'
+
+
+    @property
+    def _create_rsp_path(self):
+        return 'restconf/operations/rendered-service-path:create-rendered-path'
+
+    @property
+    def _delete_rsp_path(self):
+        return 'restconf/operations/rendered-service-path:delete-rendered-path'
+
+
+    @property
+    def _get_rsp_paths(self):
+        return 'restconf/operational/rendered-service-path:rendered-service-paths'
+
+    @property
+    def _get_rsp_path(self):
+        return 'restconf/operational/rendered-service-path:rendered-service-paths/rendered-service-path/{}'
+
+    @property
+    def _access_list_path(self):
+        return 'restconf/config/ietf-access-control-list:access-lists/acl/{}'
+
+    @property
+    def _service_function_classifier_path(self):
+        return 'restconf/config/service-function-classifier:service-function-classifiers/service-function-classifier/{}'
+
+    @property
+    def _access_lists_path(self):
+        return 'restconf/config/ietf-access-control-list:access-lists'
+
+    @property
+    def _service_function_classifiers_path(self):
+        return 'restconf/config/service-function-classifier:service-function-classifiers'
+
+
+    def _create_sf(self,account,vnffg_chain,sf_dp_list):
+        "Create SF"
+        sf_json = {}
+
+        for vnf in vnffg_chain.vnf_chain_path:
+            for vnfr in vnf.vnfr_ids:
+                sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name))
+                print(sf_url)
+                r=requests.get(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+                # If the SF is not found; create new SF
+                if r.status_code == 200:
+                    logger.info("SF with name %s is already present in ODL. Skipping update", vnfr.vnfr_name)
+                    continue
+                elif r.status_code != 404:
+                    r.raise_for_status()
+
+                sf_dict = {}
+                sf_dict['name'] = vnfr.vnfr_name
+                sf_dict['nsh-aware'] = vnf.nsh_aware
+                sf_dict['type'] = vnf.service_function_type
+                sf_dict['ip-mgmt-address'] = vnfr.mgmt_address
+                sf_dict['rest-uri'] = 'http://{}:{}'.format(vnfr.mgmt_address, vnfr.mgmt_port)
+
+                sf_dict['sf-data-plane-locator'] = list()
+                for vdu in vnfr.vdu_list:
+                    sf_dp = {}
+                    if vdu.port_id in sf_dp_list.keys():
+                        sf_dp_entry = sf_dp_list[vdu.port_id]
+                        sf_dp['name'] = sf_dp_entry.name
+                        sf_dp['ip'] = vdu.address
+                        sf_dp['port'] = vdu.port
+                        sf_dp['transport'] = "service-locator:{}".format(vnf.transport_type)
+                        if vnfr.sff_name:
+                            sf_dp['service-function-forwarder'] = vnfr.sff_name
+                        else:
+                            sff_name = sf_dp_entry.sff_name
+                            if sff_name is None:
+                                logger.error("SFF not found for port %s in SF %s", vdu.port_id, vnfr.vnfr_name)
+                            sf_dp['service-function-forwarder'] = sff_name
+                            sf_dp['service-function-ovs:ovs-port'] = dict()
+                            if sf_dp_entry.ovsdb_tp_name is not None:
+                                sf_dp['service-function-ovs:ovs-port']['port-id'] =  sf_dp_entry.ovsdb_tp_name
+                        sf_dict['sf-data-plane-locator'].append(sf_dp)
+                    else:
+                        logger.error("Port %s not found in SF DP list",vdu.port_id)
+
+                sf_json['service-function'] = sf_dict
+                sf_data = json.dumps(sf_json)
+                sf_url = self._get_rest_url(account,self._service_function_path.format(vnfr.vnfr_name))
+                print(sf_url)
+                print(sf_data)
+                r=requests.put(sf_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sf_data)
+                r.raise_for_status()
+
+
+    def _create_sff(self,account,vnffg_chain,sff):
+        "Create SFF"
+        sff_json = {}
+        sff_dict = {}
+        #sff_dp_name = "SFF1" + '-' + 'DP1'
+        sff_dp_name = sff.dp_name 
+                
+        sff_url = self._get_rest_url(account,self._service_function_forwarder_path.format(sff.name))
+        print(sff_url)
+        r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        # If the SFF is not found; create new SF
+        if r.status_code == 200:
+            logger.info("SFF with name %s is already present in ODL. Skipping full update", sff.name)
+            sff_dict = r.json()
+            sff_updated = False
+            for sf_dp in sff.sf_dp_list:
+                for sff_sf in sff_dict['service-function-forwarder'][0]['service-function-dictionary']:
+                    if sf_dp.vnfr_name == sff_sf['name']:
+                        logger.info("SF with name %s is already found in SFF %s SF Dictionay. Skipping update",sf_dp.vnfr_name,sff.name) 
+                        break
+                else:
+                    logger.info("SF with name %s is not found in SFF %s SF Dictionay",sf_dp.vnfr_name, sff.name)
+                    sff_updated = True
+                    sff_sf_dict = {}
+                    sff_sf_dp_loc = {}
+                    sff_sf_dict['name'] = sf_dp.vnfr_name
+
+                    # Below two lines are enabled only for ODL Beryillium
+                    sff_sf_dp_loc['sff-dpl-name'] = sff_dp_name
+                    sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name
+
+                    sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc
+                    sff_dict['service-function-forwarder'][0]['service-function-dictionary'].append(sff_sf_dict)
+            if sff_updated is True:
+                sff_data = json.dumps(sff_dict)
+                print(sff_data)
+                r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data)
+                r.raise_for_status()
+            return        
+        elif r.status_code != 404:
+            r.raise_for_status()
+        
+        sff_name = sff.name
+        sff_ip = sff.ip
+        sff_dp_ip = sff.dp_ip
+        sff_port = sff.sff_port
+        sff_bridge_name = ''
+        sff_rest_port = sff.sff_rest_port
+        sff_ovs_op = {}
+        if sff_rest_based is False:
+            sff_bridge_name = sff.br_name
+            sff_ovs_op  = {"key": "flow",
+                       "nshc1": "flow",
+                       "nsp": "flow",
+                       "remote-ip": "flow",
+                       "dst-port": sff_port,
+                       "nshc3": "flow",
+                       "nshc2": "flow",
+                       "nshc4": "flow",
+                       "nsi": "flow"}
+
+
+        sff_dict['name'] = sff_name
+        sff_dict['service-node'] = ''
+        sff_dict['ip-mgmt-address'] = sff_ip
+        if sff_rest_based:
+            sff_dict['rest-uri'] = 'http://{}:{}'.format(sff_ip, sff_rest_port)
+        else:
+            sff_dict['service-function-forwarder-ovs:ovs-bridge'] = {"bridge-name": sff_bridge_name}
+        sff_dict['service-function-dictionary'] = list()
+        for sf_dp in sff.sf_dp_list:
+            sff_sf_dict = {}
+            sff_sf_dp_loc = {}
+            sff_sf_dict['name'] = sf_dp.vnfr_name
+
+            # Below set of lines are reqd for Lithium
+            #sff_sf_dict['type'] = sf_dp.service_function_type
+            #sff_sf_dp_loc['ip'] = sf_dp.address
+            #sff_sf_dp_loc['port'] = sf_dp.port
+            #sff_sf_dp_loc['transport'] = sf_dp.transport_type
+            #sff_sf_dp_loc['service-function-forwarder-ovs:ovs-bridge'] = {}
+
+            # Below two lines are enabled only for ODL Beryillium
+            sff_sf_dp_loc['sff-dpl-name'] = sff_dp_name
+            sff_sf_dp_loc['sf-dpl-name'] = sf_dp.name
+
+            sff_sf_dict['sff-sf-data-plane-locator'] = sff_sf_dp_loc
+            sff_dict['service-function-dictionary'].append(sff_sf_dict)
+
+        sff_dict['sff-data-plane-locator'] = list()
+        sff_dp = {}
+        dp_loc = {} 
+        sff_dp['name'] = sff_dp_name 
+        dp_loc['ip'] = sff_dp_ip
+        dp_loc['port'] = sff_port
+        dp_loc['transport'] = 'service-locator:vxlan-gpe'
+        sff_dp['data-plane-locator'] = dp_loc
+        if sff_rest_based is False:
+            sff_dp['service-function-forwarder-ovs:ovs-options'] = sff_ovs_op
+            #sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {'bridge-name':sff_bridge_name}
+            sff_dp["service-function-forwarder-ovs:ovs-bridge"] = {}
+        sff_dict['sff-data-plane-locator'].append(sff_dp)
+
+        sff_json['service-function-forwarder'] = sff_dict
+        sff_data = json.dumps(sff_json)
+        print(sff_data)
+        r=requests.put(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sff_data)
+        r.raise_for_status()
+
+    def _create_sfc(self,account,vnffg_chain):
+        "Create SFC"
+        sfc_json = {}
+        sfc_dict = {}
+        sfc_dict['name'] = vnffg_chain.name
+        sfc_dict['sfc-service-function'] = list()
+        vnf_chain_list = sorted(vnffg_chain.vnf_chain_path, key = lambda x: x.order)
+        for vnf in vnf_chain_list:
+            sfc_sf_dict = {}
+            sfc_sf_dict['name'] = vnf.service_function_type
+            sfc_sf_dict['type'] = vnf.service_function_type
+            sfc_sf_dict['order'] = vnf.order 
+            sfc_dict['sfc-service-function'].append(sfc_sf_dict)
+        sfc_json['service-function-chain'] = sfc_dict
+        sfc_data = json.dumps(sfc_json)
+        sfc_url = self._get_rest_url(account,self._service_function_chain_path.format(vnffg_chain.name))
+        print(sfc_url)
+        print(sfc_data)
+        r=requests.put(sfc_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfc_data)
+        r.raise_for_status()
+       
+    def _create_sfp_metadata(self,account,sfc_classifier):
+        " Create SFP metadata"
+        sfp_meta_json = {}
+        sfp_meta_dict = {}
+        sfp_meta_dict['name'] = sfc_classifier.name
+        if sfc_classifier.vnffg_metadata.ctx1:
+            sfp_meta_dict['context-header1'] = sfc_classifier.vnffg_metadata.ctx1
+        if sfc_classifier.vnffg_metadata.ctx2:
+            sfp_meta_dict['context-header2'] = sfc_classifier.vnffg_metadata.ctx2
+        if sfc_classifier.vnffg_metadata.ctx3:
+            sfp_meta_dict['context-header3'] = sfc_classifier.vnffg_metadata.ctx3
+        if sfc_classifier.vnffg_metadata.ctx4:
+            sfp_meta_dict['context-header4'] = sfc_classifier.vnffg_metadata.ctx4
+
+        sfp_meta_json['context-metadata'] = sfp_meta_dict
+        sfp_meta_data = json.dumps(sfp_meta_json)
+        sfp_meta_url = self._get_rest_url(account,self._sfp_metadata_path.format(sfc_classifier.name))
+        print(sfp_meta_url)
+        print(sfp_meta_data)
+        r=requests.put(sfp_meta_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_meta_data)
+        r.raise_for_status()
+
+    def _create_sfp(self,account,vnffg_chain, sym_chain=False,classifier_name=None,vnffg_metadata_name=None):
+        "Create SFP"
+        sfp_json = {}
+        sfp_dict = {}
+        sfp_dict['name'] = vnffg_chain.name
+        sfp_dict['service-chain-name'] = vnffg_chain.name
+        sfp_dict['symmetric'] = sym_chain
+        sfp_dict['transport-type'] = 'service-locator:vxlan-gpe'
+        if vnffg_metadata_name:
+            sfp_dict['context-metadata'] = vnffg_metadata_name 
+        if classifier_name: 
+            sfp_dict['classifier'] = classifier_name 
+
+        sfp_json['service-function-path'] = sfp_dict
+        sfp_data = json.dumps(sfp_json)
+        sfp_url = self._get_rest_url(account,self._sfp_path.format(vnffg_chain.name))
+        print(sfp_url)
+        print(sfp_data)
+        r=requests.put(sfp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfp_data)
+        r.raise_for_status()
+
+    def _create_rsp(self,account,vnffg_chain_name, sym_chain=True):
+        "Create RSP"
+        rsp_json = {}
+        rsp_input = {}
+        rsp_json['input'] = {}
+        rsp_input['name'] = vnffg_chain_name
+        rsp_input['parent-service-function-path'] = vnffg_chain_name
+        rsp_input['symmetric'] = sym_chain
+
+        rsp_json['input'] = rsp_input
+        rsp_data = json.dumps(rsp_json)
+        self._rsp_data = rsp_json
+        rsp_url = self._get_rest_url(account,self._create_rsp_path)
+        print(rsp_url)
+        print(rsp_data)
+        r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data)
+        r.raise_for_status()
+        print(r.json())
+        output_json = r.json()
+        return output_json['output']['name']
+        
+    def _get_sff_list_for_chain(self, account,sf_dp_list):
+        """
+        Get List of all SFF that needs to be created based on VNFs included in VNFFG chain.
+        """
+
+        sff_list = {}
+        if sf_dp_list is None:
+            logger.error("VM List for vnffg chain is empty while trying to get SFF list")
+        url = self._network_topology_rest_url(account)
+        r=requests.get(url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        nw_topo = r.json()
+
+        for topo in nw_topo['network-topology']['topology']:
+            if ('node' in topo and len(topo['node']) > 0):
+                for node in topo['node']:
+                    if ('termination-point' in node and len(node['termination-point']) > 0):
+                        for term_point in node['termination-point']:
+                            if 'ovsdb:interface-external-ids' in term_point:
+                                vm_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'vm-id']
+                                if len(vm_id) == 0:
+                                    continue
+                                vmid = vm_id[0]['external-id-value']
+                                intf_id = [res for res in term_point['ovsdb:interface-external-ids'] if res['external-id-key'] == 'iface-id']
+                                if len(intf_id) == 0:
+                                    continue 
+                                intfid = intf_id[0]['external-id-value'] 
+                                if intfid not in sf_dp_list.keys():
+                                    continue
+                                if sf_dp_list[intfid].vm_id != vmid:
+                                    logger.error("Intf ID %s is not present in VM %s", intfid, vmid)  
+                                    continue 
+                                sf_dp_list[intfid].ovsdb_tp_name = term_point['ovsdb:name']
+                           
+                                if 'ovsdb:managed-by' in node:
+                                    rr=re.search('network-topology:node-id=\'([-\w\:\/]*)\'',node['ovsdb:managed-by'])
+                                    node_id = rr.group(1)
+                                    ovsdb_node = [node  for node in topo['node'] if node['node-id'] == node_id]
+                                    if ovsdb_node:
+                                        if 'ovsdb:connection-info' in ovsdb_node[0]:
+                                            sff_ip = ovsdb_node[0]['ovsdb:connection-info']['local-ip']
+                                            sff_br_name = node['ovsdb:bridge-name']
+                                            sff_br_uuid = node['ovsdb:bridge-uuid']
+                                            sff_dp_ip = sff_ip
+
+                                            if 'ovsdb:openvswitch-other-configs' in  ovsdb_node[0]: 
+                                                for other_key in ovsdb_node[0]['ovsdb:openvswitch-other-configs']:
+                                                    if other_key['other-config-key'] == 'local_ip':
+                                                        local_ip_str = other_key['other-config-value']
+                                                        sff_dp_ip = local_ip_str.split(',')[0]
+                                                        break
+
+                                            sff_name = socket.getfqdn(sff_ip)
+                                            if sff_br_uuid in sff_list:
+                                                sff_list[sff_name].add_sf_dp_to_sff(sf_dp_list[intfid])
+                                                sf_dp_list[intfid]._update_sff_name(sff_name)
+                                            else:
+                                                sff_dp_ip = sff_ip   #overwrite sff_dp_ip to SFF ip for now
+                                                sff_list[sff_name] = Sff(sff_name,sff_ip,6000, sff_dp_ip, 4790,sff_br_uuid,sff_br_name)
+                                                sf_dp_list[intfid]._update_sff_name(sff_name)
+                                                sff_list[sff_name].add_sf_dp_to_sff(sf_dp_list[intfid])
+        return sff_list
+                                         
+
+    def _get_sf_dp_list_for_chain(self,account,vnffg_chain):
+        """
+        Get list of all Service Function Data Plane Locators present in VNFFG 
+        useful for easy reference while creating SF and SFF
+        """
+        sfdp_list = {}
+        for vnf in vnffg_chain.vnf_chain_path:
+            for vnfr in vnf.vnfr_ids:
+                for vdu in vnfr.vdu_list:
+                    sfdp = SfDpLocator(vdu.name,vdu.port_id,vnfr.vnfr_name, vdu.vm_id)
+                    sfdp._update_vnf_params(vnf.service_function_type, vdu.address, vdu.port, vnf.transport_type)
+                    if vnfr.sff_name:
+                        sfdp._update_sff_name(vnfr.sff_name)
+                    sfdp_list[vdu.port_id] = sfdp 
+        return sfdp_list
+
+    def create_sfc(self, account, vnffg_chain):
+        "Create SFC chain"
+
+        sff_list = {}
+        sf_dp_list = {}
+
+        sf_dp_list = self._get_sf_dp_list_for_chain(account,vnffg_chain)
+
+        if sff_rest_based is False and len(vnffg_chain.sff) == 0:
+            # Get the list of all SFFs required for vnffg chain
+            sff_list = self._get_sff_list_for_chain(account,sf_dp_list)
+
+        for sff in vnffg_chain.sff:
+          sff_list[sff.name] = Sff(sff.name, sff.mgmt_address,sff.mgmt_port,sff.dp_endpoints[0].address, sff.dp_endpoints[0].port, sff.name)
+          for _,sf_dp in sf_dp_list.items():
+              if sf_dp.sff_name and sf_dp.sff_name == sff.name:
+                  sff_list[sff.name].add_sf_dp_to_sff(sf_dp) 
+
+        #Create all the SF in VNFFG chain
+        self._create_sf(account,vnffg_chain,sf_dp_list)
+
+        for _,sff in sff_list.items():
+            self._create_sff(account,vnffg_chain,sff)
+
+
+        self._create_sfc(account,vnffg_chain)
+
+        self._create_sfp(account,vnffg_chain,classifier_name=vnffg_chain.classifier_name,
+                                   vnffg_metadata_name=vnffg_chain.classifier_name)
+
+        ## Update to SFF could have deleted some RSP; so get list of SFP and 
+        ## check RSP exists for same and create any as necessary
+        #rsp_name = self._create_rsp(account,vnffg_chain)
+        #return rsp_name
+        self._create_all_rsps(account)
+        self._recreate_all_sf_classifiers(account)
+        return vnffg_chain.name
+
+    def _recreate_all_sf_classifiers(self,account):
+        """
+        Re create all SF classifiers
+        """
+        sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path)
+        print(sfcl_url)
+        #Get the classifier
+        r=requests.get(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        if r.status_code == 200:
+            print(r)
+            sfcl_json = r.json()
+        elif r.status_code == 404:
+            return         
+        else: 
+            r.raise_for_status()
+
+        #Delete the classifiers and re-add same back
+        r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+        #Readd it back
+        time.sleep(3)
+        print(sfcl_json)
+        sfcl_data = json.dumps(sfcl_json)
+        r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data)
+        r.raise_for_status()
+
+    def _create_all_rsps(self,account):
+        """
+        Create all the RSPs for SFP found
+        """
+        sfps_url = self._get_rest_url(account,self._sfps_path)
+        r=requests.get(sfps_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        r.raise_for_status()
+        sfps_json = r.json()
+        if 'service-function-path' in sfps_json['service-function-paths']:
+            for sfp in sfps_json['service-function-paths']['service-function-path']:
+                rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfp['name']))
+                r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+                if r.status_code == 404:
+                    # Create the RSP
+                    logger.info("Creating RSP for Service Path with name %s",sfp['name'])
+                    self._create_rsp(account,sfp['name'])
+
+    def delete_all_sf(self, account):
+        "Delete all the SFs"
+        sf_url = self._get_rest_url(account,self._service_functions_path)
+        print(sf_url)
+        r=requests.delete(sf_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+
+    def delete_all_sff(self, account):
+        "Delete all the SFFs"
+        sff_url = self._get_rest_url(account,self._service_function_forwarders_path)
+        print(sff_url)
+        r=requests.delete(sff_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfc(self, account):
+        "Delete all the SFCs"
+        sfc_url = self._get_rest_url(account,self._service_function_chains_path)
+        print(sfc_url)
+        r=requests.delete(sfc_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfp_metadata(self, account):
+        "Delete all the SFPs metadata"
+        sfp_metadata_url = self._get_rest_url(account,self._sfps_metadata_path)
+        print(sfp_metadata_url)
+        r=requests.delete(sfp_metadata_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_sfp(self, account):
+        "Delete all the SFPs"
+        sfp_url = self._get_rest_url(account,self._sfps_path)
+        print(sfp_url)
+        r=requests.delete(sfp_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def delete_all_rsp(self, account):
+        "Delete all the RSP"
+        #rsp_list = self.get_rsp_list(account)
+        url = self._get_rest_url(account,self._get_rsp_paths)
+        print(url)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        print(r.json())
+        rsp_list = r.json()
+
+        #for vnffg in rsp_list.vnffg_rendered_path: 
+        for sfc_rsp in rsp_list['rendered-service-paths']['rendered-service-path']:
+            rsp_json = {}
+            rsp_input = {}
+            rsp_json['input'] = {}
+            rsp_input['name'] = sfc_rsp['name']
+
+            rsp_json['input'] = rsp_input
+            rsp_data = json.dumps(rsp_json)
+            self._rsp_data = rsp_json
+            rsp_url = self._get_rest_url(account,self._delete_rsp_path)
+            print(rsp_url)
+            print(rsp_data)
+
+            r=requests.post(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=rsp_data)
+            r.raise_for_status()
+            print(r.json())
+            #output_json = r.json()
+            #return output_json['output']['name']
+            
+    def terminate_all_sfc(self, account):
+        "Terminate SFC chain"
+        self.delete_all_rsp(account)
+        self.delete_all_sfp(account)
+        self.delete_all_sfc(account)
+        self.delete_all_sff(account)
+        self.delete_all_sf(account)
+
+    def _fill_rsp_list(self,sfc_rsp_list,sff_list):
+        vnffg_rsps = RwsdnYang.VNFFGRenderedPaths()
+        for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']:
+            rsp = vnffg_rsps.vnffg_rendered_path.add()
+            rsp.name = sfc_rsp['name']
+            rsp.path_id = sfc_rsp['path-id']
+            for sfc_rsp_hop in sfc_rsp['rendered-service-path-hop']:
+                rsp_hop = rsp.rendered_path_hop.add()
+                rsp_hop.hop_number =  sfc_rsp_hop['hop-number']
+                rsp_hop.service_index = sfc_rsp_hop['service-index']
+                rsp_hop.vnfr_name =  sfc_rsp_hop['service-function-name']
+                rsp_hop.service_function_forwarder.name = sfc_rsp_hop['service-function-forwarder']
+                for sff in sff_list['service-function-forwarders']['service-function-forwarder']:
+                    if sff['name'] == rsp_hop.service_function_forwarder.name:
+                        rsp_hop.service_function_forwarder.ip_address = sff['sff-data-plane-locator'][0]['data-plane-locator']['ip']
+                        rsp_hop.service_function_forwarder.port = sff['sff-data-plane-locator'][0]['data-plane-locator']['port']
+                        break
+        return vnffg_rsps
+             
+
+    def get_rsp_list(self,account):
+        "Get RSP list"
+
+        sff_url = self._get_rest_url(account,self._service_function_forwarders_path)
+        print(sff_url)
+        r=requests.get(sff_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+        r.raise_for_status()
+        sff_list = r.json()
+
+        url = self._get_rest_url(account,self._get_rsp_paths)
+        print(url)
+        r = requests.get(url,auth=(account.odl.username,account.odl.password)) 
+        r.raise_for_status()
+        print(r.json())
+        return self._fill_rsp_list(r.json(),sff_list)
+
+    def create_sfc_classifier(self, account, sfc_classifiers):
+        "Create SFC Classifiers"
+        self._create_sfp_metadata(account,sfc_classifiers)
+        self._add_acl_rules(account, sfc_classifiers)
+        self._create_sf_classifier(account, sfc_classifiers)
+        return sfc_classifiers.name
+
+    def terminate_sfc_classifier(self, account, sfc_classifier_name):
+        "Create SFC Classifiers"
+        self.delete_all_sfp_metadata(account)
+        self._terminate_sf_classifier(account, sfc_classifier_name)
+        self._del_acl_rules(account, sfc_classifier_name)
+
+    def _del_acl_rules(self,account,sfc_classifier_name):
+        " Terminate SF classifiers"
+        acl_url = self._get_rest_url(account,self._access_lists_path)
+        print(acl_url)
+        r=requests.delete(acl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def _terminate_sf_classifier(self,account,sfc_classifier_name):
+        " Terminate SF classifiers"
+        sfcl_url = self._get_rest_url(account,self._service_function_classifiers_path)
+        print(sfcl_url)
+        r=requests.delete(sfcl_url,auth=(account.odl.username,account.odl.password))
+        r.raise_for_status()
+
+    def _create_sf_classifier(self,account,sfc_classifiers):
+        " Create SF classifiers"
+        sf_classifier_json = {}
+        sf_classifier_dict = {}
+        sf_classifier_dict['name'] = sfc_classifiers.name
+        sf_classifier_dict['access-list'] = sfc_classifiers.name
+        sf_classifier_dict['scl-service-function-forwarder'] = list()
+        scl_sff = {}
+        scl_sff_name = ''
+
+        if sfc_classifiers.has_field('sff_name') and sfc_classifiers.sff_name is not None:
+            scl_sff_name = sfc_classifiers.sff_name
+        elif  sfc_classifiers.has_field('port_id') and sfc_classifiers.has_field('vm_id'):
+            sf_dp = SfDpLocator(sfc_classifiers.port_id, sfc_classifiers.port_id,'', sfc_classifiers.vm_id)
+            sf_dp_list= {}
+            sf_dp_list[sfc_classifiers.port_id] = sf_dp
+            self._get_sff_list_for_chain(account,sf_dp_list)
+
+            if sf_dp.sff_name is None:
+                logger.error("SFF not found for port %s, VM: %s",sfc_classifiers.port_id,sfc_classifiers.vm_id) 
+            else:
+                logger.info("SFF with name %s  found for port %s, VM: %s",sf_dp.sff_name, sfc_classifiers.port_id,sfc_classifiers.vm_id) 
+                scl_sff_name = sf_dp.sff_name
+        else:
+            rsp_url = self._get_rest_url(account,self._get_rsp_path.format(sfc_classifiers.rsp_name))
+            r = requests.get(rsp_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'})
+            if r.status_code == 200:
+                rsp_data = r.json()
+                if 'rendered-service-path' in rsp_data and len(rsp_data['rendered-service-path'][0]['rendered-service-path-hop']) > 0:
+                    scl_sff_name = rsp_data['rendered-service-path'][0]['rendered-service-path-hop'][0]['service-function-forwarder']
+        
+        logger.debug("SFF for classifer %s found is %s",sfc_classifiers.name, scl_sff_name)        
+        scl_sff['name'] = scl_sff_name
+        #scl_sff['interface'] = sff_intf_name
+        sf_classifier_dict['scl-service-function-forwarder'].append(scl_sff)
+
+        sf_classifier_json['service-function-classifier'] = sf_classifier_dict
+
+        sfcl_data = json.dumps(sf_classifier_json)
+        sfcl_url = self._get_rest_url(account,self._service_function_classifier_path.format(sfc_classifiers.name))
+        print(sfcl_url)
+        print(sfcl_data)
+        r=requests.put(sfcl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=sfcl_data)
+        r.raise_for_status()
+
+    def _add_acl_rules(self, account,sfc_classifiers):
+        "Create ACL rules"
+        access_list_json = {}
+        access_list_dict = {}
+        acl_entry_list = list()
+        acl_list_dict = {}
+        for acl_rule in sfc_classifiers.match_attributes:
+            acl_entry = {} 
+            acl_entry['rule-name']  = acl_rule.name
+            acl_entry['actions'] = {}
+            #acl_entry['actions']['netvirt-sfc-acl:rsp-name'] = sfc_classifiers.rsp_name
+            acl_entry['actions']['service-function-acl:rendered-service-path'] = sfc_classifiers.rsp_name
+
+            matches = {}
+            for field, value in acl_rule.as_dict().items():
+                if field == 'ip_proto':
+                    matches['protocol'] = value
+                elif field == 'source_ip_address':
+                    matches['source-ipv4-network'] = value
+                elif field == 'destination_ip_address':
+                    matches['destination-ipv4-network'] = value
+                elif field == 'source_port':
+                    matches['source-port-range'] = {'lower-port':value, 'upper-port':value}
+                elif field == 'destination_port':
+                    matches['destination-port-range'] = {'lower-port':value, 'upper-port':value}
+            acl_entry['matches'] = matches
+            acl_entry_list.append(acl_entry)    
+        acl_list_dict['ace'] = acl_entry_list 
+        access_list_dict['acl-name'] = sfc_classifiers.name
+        access_list_dict['access-list-entries'] = acl_list_dict
+        access_list_json['acl'] = access_list_dict
+
+        acl_data = json.dumps(access_list_json)
+        acl_url = self._get_rest_url(account,self._access_list_path.format(sfc_classifiers.name))
+        print(acl_url)
+        print(acl_data)
+        r=requests.put(acl_url,auth=(account.odl.username,account.odl.password),headers={'content-type': 'application/json'}, data=acl_data)
+        r.raise_for_status()
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/CMakeLists.txt
new file mode 100644 (file)
index 0000000..90e06b4
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwsdn_sim rwsdn_sim.py)
diff --git a/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py b/rwlaunchpad/plugins/rwvns/vala/rwsdn_sim/rwsdn_sim.py
new file mode 100644 (file)
index 0000000..3061265
--- /dev/null
@@ -0,0 +1,113 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import itertools
+import logging
+import os
+import uuid
+import time
+
+import ipaddress
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwSdn', '1.0')
+from gi.repository import (
+    GObject,
+    RwSdn, # Vala package
+    RwTypes,
+    RwsdnYang,
+    #IetfL2TopologyYang as l2Tl,
+    RwTopologyYang as RwTl,
+    )
+
+import rw_status
+import rwlogger
+
+from rift.topmgr.sdnsim import SdnSim
+
+
+logger = logging.getLogger('rwsdn.sdnsim')
+
+
+class UnknownAccountError(Exception):
+    pass
+
+
+class MissingFileError(Exception):
+    pass
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    UnknownAccountError: RwTypes.RwStatus.NOTFOUND,
+    MissingFileError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class SdnSimPlugin(GObject.Object, RwSdn.Topology):
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self.sdnsim = SdnSim()
+        
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    subcategory="sdnsim",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_validate_sdn_creds(self, account):
+        """
+        Validates the sdn account credentials for the specified account.
+        Performs an access to the resources using Keystone API. If creds
+        are not valid, returns an error code & reason string
+
+        @param account - a SDN account
+
+        Returns:
+            Validation Code and Details String
+        """
+        status = RwsdnYang.SdnConnectionStatus()
+        print("SDN Successfully connected")
+        status.status = "success"
+        status.details = "Connection was successful"
+        #logger.debug('Done with validate SDN creds: %s', type(status))
+        return status
+
+
+    @rwstatus(ret_on_failure=[None])
+    def do_get_network_list(self, account):
+        """
+        Returns the list of discovered networks
+
+        @param account - a SDN account
+
+        """
+        logger.debug('Get network list: ')
+        nwtop = self.sdnsim.get_network_list( account)
+        logger.debug('Done with get network list: %s', type(nwtop))
+        return nwtop
diff --git a/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt b/rwlaunchpad/plugins/rwvns/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..00cde0b
--- /dev/null
@@ -0,0 +1,37 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+
+set(source_yang_files rwsdn.yang)
+
+rift_add_yang_target(
+  TARGET rwsdn_yang
+  YANG_FILES ${source_yang_files}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+    mano-types_yang_gen
+)
+
diff --git a/rwlaunchpad/plugins/rwvns/yang/Makefile b/rwlaunchpad/plugins/rwvns/yang/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang b/rwlaunchpad/plugins/rwvns/yang/rwsdn.yang
new file mode 100644 (file)
index 0000000..5ea2eb0
--- /dev/null
@@ -0,0 +1,402 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rwsdn
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwsdn";
+  prefix "rwsdn";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+
+  revision 2014-12-30 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT RWSDN cloud data";
+  }
+
+  typedef sdn-connection-status-enum {
+    description "Connection status for the sdn account";
+    type enumeration {
+      enum unknown;
+      enum validating;
+      enum success;
+      enum failure;
+    }
+  }
+
+  grouping connection-status {
+    container connection-status {
+      config false;
+      rwpb:msg-new SdnConnectionStatus;
+      leaf status {
+        type sdn-connection-status-enum;
+      }
+      leaf details {
+        type string;
+      }
+    }
+  }
+
+  uses connection-status;
+
+  typedef sdn-account-type {
+    description "SDN account type";
+    type enumeration {
+      enum odl;
+      enum mock;
+      enum sdnsim;
+    }
+  }
+
+  grouping sdn-provider-auth {
+    leaf account-type {
+      type sdn-account-type;
+    }
+
+    choice provider-specific-info {
+      container odl {
+        leaf username {
+          type string {
+            length "1..255";
+          }
+        }
+
+        leaf password {
+          type string {
+            length "1..32";
+          }
+        }
+
+        leaf url {
+          type string {
+            length "1..255";
+          }
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_odl";
+        }
+      }
+      container mock {
+        leaf username {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_mock";
+        }
+      }
+
+      container sdnsim {
+        leaf username {
+          type string;
+        }
+        leaf topology-source {
+          type string;
+        }
+        leaf plugin-name {
+          type string;
+          default "rwsdn_sim";
+        }
+      }
+    }
+  }
+
+  container sdn-accounts {
+    list sdn-account-list {
+      rwpb:msg-new SDNAccount;
+      key "name";
+
+      leaf name {
+        type string;
+      }
+
+      uses sdn-provider-auth;
+      uses connection-status;
+    }
+  }
+
+  container vnffgs {
+    list vnffg-chain {
+      key "name";
+      rwpb:msg-new VNFFGChain;
+
+      leaf name {
+        type string;
+      }
+
+      list vnf-chain-path {
+        key "order";
+        leaf order {
+          type uint32;
+          description " Order of the VNF in VNFFG chain";
+        }
+        leaf service-function-type {
+          type string;
+        }
+        leaf nsh-aware {
+          type boolean;
+        }
+        leaf transport-type {
+          type string;
+        }
+        list vnfr-ids {
+          key "vnfr-id";
+          leaf vnfr-id {
+            type yang:uuid;
+          }
+          leaf vnfr-name {
+            type string;
+          }
+          leaf mgmt-address {
+            type inet:ip-address;
+          }
+          leaf mgmt-port {
+              type inet:port-number;
+          }
+          list vdu-list {
+            key "vm-id port-id";
+            leaf port-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf vm-id {
+              rwpb:field-inline "true";
+              rwpb:field-string-max 64;
+              type string;
+            }
+            leaf name {
+              type string;
+            }
+            leaf address {
+              type inet:ip-address;
+            }
+            leaf port {
+              type inet:port-number;
+            }
+          }
+          leaf sff-name {
+            description "SFF name useful for non OVS based SFF";
+            type string;
+          } 
+        }
+      }
+      list sff {
+        rwpb:msg-new VNFFGSff;
+        key "name"; 
+        leaf name {
+          type string;
+        }
+        leaf function-type {
+          type string;
+        }
+        leaf mgmt-address {
+          type inet:ip-address;
+        }
+        leaf mgmt-port {
+          type inet:port-number;
+        }
+        list dp-endpoints {
+          key "name";
+          leaf name {
+           type string;
+          } 
+          leaf address {
+            type inet:ip-address;
+          }
+          leaf port {
+            type inet:port-number;
+          }
+        }
+        list vnfr-list {
+          key "vnfr-name";
+          leaf vnfr-name {
+            type string;
+          }
+        }
+      }
+      leaf classifier-name {
+        type string;
+      }
+    }
+  }
+
+  container vnffg-rendered-paths {
+    rwpb:msg-new VNFFGRenderedPaths;
+    list vnffg-rendered-path {
+      key "name";
+      rwpb:msg-new VNFFGRenderedPath;
+      config false;
+      leaf name {
+        type string;
+      }
+      leaf path-id {
+          description
+              "Unique Identifier for the service path";
+        type uint32;
+      }
+      list rendered-path-hop {
+        key "hop-number";
+        leaf hop-number {
+          type uint8;
+        }
+        leaf service-index {
+            description
+                "Location within the service path";
+          type uint8;
+        }
+        leaf vnfr-name {
+          type string;
+        }
+        container service-function-forwarder {
+          leaf name { 
+            description
+                "Service Function Forwarder name";
+            type string;
+          }
+          leaf ip-address {
+            description
+                "Service Function Forwarder Data Plane IP address";
+            type inet:ip-address;
+          }  
+          leaf port {
+            description
+                "Service Function Forwarder Data Plane port";
+            type inet:port-number;
+          }  
+        }
+      }
+    }
+  }
+
+
+  container vnffg-classifiers {
+    list vnffg-classifier {
+      key "name";
+      rwpb:msg-new VNFFGClassifier;
+
+      leaf name {
+        type string;
+      }
+      leaf rsp-name {
+        type string;
+      }
+      leaf port-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+      leaf vm-id {
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+      }
+      leaf sff-name {
+        type string;
+      }
+      container vnffg-metadata {
+        leaf ctx1 {
+          type string;
+        }
+        leaf ctx2 {
+          type string;
+        }
+        leaf ctx3 {
+          type string;
+        }
+        leaf ctx4 {
+          type string;
+        }
+      }
+      list match-attributes {
+        description
+            "List of match attributes.";
+        key "name";
+        leaf name {
+          description
+              "Name for the Access list";
+          type string;  
+        }
+
+        leaf ip-proto {
+          description
+              "IP Protocol.";
+          type uint8;
+        }
+
+        leaf source-ip-address {
+          description
+              "Source IP address.";
+          type inet:ip-prefix;
+        }
+
+        leaf destination-ip-address {
+          description
+              "Destination IP address.";
+          type inet:ip-prefix;
+        }
+
+        leaf source-port {
+          description
+              "Source port number.";
+          type inet:port-number;
+        }
+
+        leaf destination-port {
+          description
+              "Destination port number.";
+          type inet:port-number;
+        }
+      } //match-attributes
+    }
+  }
+
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/rwlaunchpad/plugins/vala/CMakeLists.txt b/rwlaunchpad/plugins/vala/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c9c20d7
--- /dev/null
@@ -0,0 +1,25 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+set(subdirs
+  rwve_vnfm_em
+  rwve_vnfm_vnf
+  rwos_ma_nfvo
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/Makefile b/rwlaunchpad/plugins/vala/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b0919bd
--- /dev/null
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwos_ma_nfvo)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwOsMaNfvo-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwos_ma_nfvo_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo.vala
new file mode 100644 (file)
index 0000000..63e4601
--- /dev/null
@@ -0,0 +1,16 @@
+namespace RwOsMaNfvo {
+
+  public interface Orchestrator: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus ns_lifecycle_event();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f9ec32f
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py b/rwlaunchpad/plugins/vala/rwos_ma_nfvo/rwos_ma_nfvo_rest/rwos_ma_nfvo_rest.py
new file mode 100644 (file)
index 0000000..3ac9429
--- /dev/null
@@ -0,0 +1,51 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwOsMaNfvo', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwOsMaNfvo,
+    RwTypes)
+
+logger = logging.getLogger('rwos-ma-nfvo-rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwOsMaNfvoRestPlugin(GObject.Object, RwOsMaNfvo.Orchestrator):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwos-ma-nfvo-rest",
+                                                log_hdl=rwlog_ctx,))
+        
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_em/CMakeLists.txt
new file mode 100644 (file)
index 0000000..12ff14c
--- /dev/null
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwve_vnfm_em)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwVeVnfmEm-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwve_vnfm_em_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em.vala
new file mode 100644 (file)
index 0000000..3da25f9
--- /dev/null
@@ -0,0 +1,16 @@
+namespace RwVeVnfmEm {
+
+  public interface ElementManager: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus vnf_lifecycle_event();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/CMakeLists.txt
new file mode 100644 (file)
index 0000000..6efbd40
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py b/rwlaunchpad/plugins/vala/rwve_vnfm_em/rwve_vnfm_em_rest/rwve_vnfm_em_rest.py
new file mode 100644 (file)
index 0000000..c7147a4
--- /dev/null
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwVeVnfmEm', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwVeVnfmEm,
+    RwTypes)
+
+logger = logging.getLogger('rw_ve_vnfm_em.rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwVeVnfmEmRestPlugin(GObject.Object, RwVeVnfmEm.ElementManager):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwcal-aws",
+                                                log_hdl=rwlog_ctx,))
+    @rwstatus
+    def do_vnf_lifecycle_event(self):
+        pass
+        
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/CMakeLists.txt
new file mode 100644 (file)
index 0000000..190763d
--- /dev/null
@@ -0,0 +1,64 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Anil Gunturu
+# Creation Date: 10/31/2015
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwve_vnfm_vnf)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwVeVnfmVnf-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES 
+    rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
+    rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
+
+  #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+  GENERATE_HEADER_FILE ${VALA_NAME}.h
+
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  #DEPENDS rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  HEADER_FILES ${VALA_NAME}.h
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+
+set(subdirs
+  rwve_vnfm_vnf_rest
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf.vala
new file mode 100644 (file)
index 0000000..6b5e84e
--- /dev/null
@@ -0,0 +1,16 @@
+namespace RwVeVnfmVnf {
+
+  public interface Vnf: GLib.Object {
+    /*
+     * Init routine
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /*
+     * Notify the EM of lifecycle event
+     */
+    public abstract RwTypes.RwStatus get_monitoring_param();
+  }
+}
+
+
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/CMakeLists.txt
new file mode 100644 (file)
index 0000000..e890eaa
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py b/rwlaunchpad/plugins/vala/rwve_vnfm_vnf/rwve_vnfm_vnf_rest/rwve_vnfm_vnf_rest.py
new file mode 100644 (file)
index 0000000..dad3321
--- /dev/null
@@ -0,0 +1,54 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import re
+import logging
+import rw_status
+import rwlogger
+import subprocess, os
+
+import gi
+gi.require_version('RwVeVnfmVnf', '1.0')
+gi.require_version('RwTypes', '1.0')
+from gi.repository import (
+    GObject,
+    RwVeVnfmVnf,
+    RwTypes)
+
+logger = logging.getLogger('rwve-vnfm-vnf-rest')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({ IndexError: RwTypes.RwStatus.NOTFOUND,
+                                             KeyError: RwTypes.RwStatus.NOTFOUND,
+                                             NotImplementedError: RwTypes.RwStatus.NOT_IMPLEMENTED,})
+
+class RwVeVnfmVnfRestPlugin(GObject.Object, RwVeVnfmVnf.Vnf):
+    """This class implements the Ve-Vnfm VALA methods."""
+
+    def __init__(self):
+        GObject.Object.__init__(self)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(rwlogger.RwLogger(subcategory="rwve-vnfm-vnf-rest",
+                                                log_hdl=rwlog_ctx,))
+
+    @rwstatus
+    def do_get_monitoring_param(self):
+        pass
+        
diff --git a/rwlaunchpad/plugins/yang/CMakeLists.txt b/rwlaunchpad/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..aa5846a
--- /dev/null
@@ -0,0 +1,79 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tom Seidenberg
+# Creation Date: 2014/04/08
+# 
+
+set(source_yang_files
+  rw-launchpad.yang
+  rw-monitor.yang
+  rw-nsm.yang
+  rw-resource-mgr.yang
+  rw-vnfm.yang
+  rw-vns.yang
+  rw-image-mgmt.yang
+  )
+
+##
+# Yang targets
+##
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-mano-log
+    START_EVENT_ID 65000
+    OUT_YANG_FILE_VAR rw_mano_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-monitor-log
+    START_EVENT_ID 64000
+    OUT_YANG_FILE_VAR rw_monitor_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-mon-params-log
+    START_EVENT_ID 67000
+    OUT_YANG_FILE_VAR rw_mon_params_log_file
+    )
+
+rift_generate_python_log_yang(
+    LOG_CATEGORY_NAME rw-resource-mgr-log
+    START_EVENT_ID 69000
+    OUT_YANG_FILE_VAR rw_resource_mgr_log_file
+    )
+
+rift_add_yang_target(
+  TARGET rwlaunchpad_yang
+  YANG_FILES
+    ${source_yang_files}
+    ${rw_mano_log_file}
+    ${rw_monitor_log_file}
+    ${rw_mon_params_log_file}
+    ${rw_resource_mgr_log_file}
+  COMPONENT ${PKG_LONG_NAME}
+  LIBRARIES
+    mano_yang_gen
+    rwcloud_yang_gen
+    rw_conman_yang_gen
+    rwconfig_agent_yang_gen
+    mano-types_yang_gen
+  DEPENDS
+    mano_yang
+    rwcloud_yang
+    rw_conman_yang
+    rwconfig_agent_yang
+    mano-types_yang
+)
+
diff --git a/rwlaunchpad/plugins/yang/Makefile b/rwlaunchpad/plugins/yang/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.tailf.yang
new file mode 100644 (file)
index 0000000..0184a9a
--- /dev/null
@@ -0,0 +1,45 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-image-mgmt-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-image-mgmt-annotation";
+  prefix "rw-image-mgmt-ann";
+
+  import rw-image-mgmt {
+    prefix rw-image-mgmt;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-image-mgmt:upload-jobs" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-image-mgmt:create-upload-job" {
+    tailf:actionpoint rw_actionpoint;
+  }
+
+  tailf:annotate "/rw-image-mgmt:cancel-upload-job" {
+    tailf:actionpoint rw_actionpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-image-mgmt.yang b/rwlaunchpad/plugins/yang/rw-image-mgmt.yang
new file mode 100644 (file)
index 0000000..833931f
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+/**
+ * @file rw-image-mgmt.yang
+ * @author Austin Cormier
+ * @date 2016/06/01
+ * @brief Image Management Yang
+ */
+
+module rw-image-mgmt
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-image-mgmt";
+  prefix "rw-image-mgmt";
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  revision 2016-06-01 {
+    description
+      "Initial revision.";
+  }
+
+  typedef job-status {
+    type enumeration {
+      enum QUEUED;
+      enum IN_PROGRESS;
+      enum CANCELLING;
+      enum CANCELLED;
+      enum COMPLETED;
+      enum FAILED;
+    }
+  }
+
+  typedef upload-task-status {
+    type enumeration {
+      enum QUEUED;
+      enum CHECK_IMAGE_EXISTS;
+      enum UPLOADING;
+      enum CANCELLING;
+      enum CANCELLED;
+      enum COMPLETED;
+      enum FAILED;
+    }
+  }
+
+  grouping image-upload-info {
+    leaf image-id {
+      description "The image id that exists in the image catalog";
+      type string;
+    }
+
+    leaf image-name {
+      description "The image name that exists in the image catalog";
+      type string;
+    }
+
+    leaf image-checksum {
+      description "The image md5 checksum";
+      type string;
+    }
+  }
+
+  grouping upload-task-status {
+    leaf status {
+      description "The status of the upload task";
+      type upload-task-status;
+      default QUEUED;
+    }
+
+    leaf detail {
+      description "Detailed upload status message";
+      type string;
+    }
+
+    leaf progress-percent {
+      description "The image upload progress percentage (0-100)";
+      type uint8;
+      default 0;
+    }
+
+    leaf bytes_written {
+      description "The number of bytes written";
+      type uint64;
+      default 0;
+    }
+
+    leaf bytes_total {
+      description "The total number of bytes to write";
+      type uint64;
+      default 0;
+    }
+
+    leaf bytes_per_second {
+      description "The total number of bytes written per second";
+      type uint32;
+      default 0;
+    }
+
+    leaf start-time {
+      description "The image upload start time (unix epoch)";
+      type uint32;
+    }
+
+    leaf stop-time {
+      description "The image upload stop time (unix epoch)";
+      type uint32;
+    }
+  }
+
+  grouping upload-task {
+    leaf cloud-account {
+      description "The cloud account to upload the image to";
+      type leafref {
+        path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+      }
+    }
+
+    uses image-upload-info;
+    uses upload-task-status;
+  }
+
+  container upload-jobs {
+    rwpb:msg-new UploadJobs;
+    description "Image upload jobs";
+    config false;
+
+    list job {
+      rwpb:msg-new UploadJob;
+      key "id";
+
+      leaf id {
+        description "Unique image upload job-id";
+        type uint32;
+      }
+
+      leaf status {
+        description "Current job status";
+        type job-status;
+      }
+
+      leaf start-time {
+        description "The job start time (unix epoch)";
+        type uint32;
+      }
+
+      leaf stop-time {
+        description "The job stop time (unix epoch)";
+        type uint32;
+      }
+
+      list upload-tasks {
+        rwpb:msg-new UploadTask;
+        description "The upload tasks that are part of this job";
+        uses upload-task;
+      }
+    }
+  }
+
+  rpc create-upload-job {
+    input {
+      rwpb:msg-new CreateUploadJob;
+
+      choice image-selection {
+        case onboarded-image {
+          description "Use an image previously onboarded in the image catalog";
+          container onboarded-image {
+            uses image-upload-info;
+          }
+        }
+
+        case external-url {
+          description "Use an HTTP URL to pull the image from";
+
+          container external-url {
+            leaf image-url {
+              description "The image HTTP URL to pull the image from";
+              type string;
+            }
+
+            uses image-upload-info;
+
+            leaf disk-format {
+              description "Format of the Disk";
+              type rwcal:disk-format;
+            }
+
+            leaf container-format {
+              description "Format of the container";
+              type rwcal:container-format;
+              default "bare";
+            }
+          }
+        }
+      }
+
+      leaf-list cloud-account {
+        description "List of cloud accounts to upload the image to";
+        type leafref {
+          path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+        }
+      }
+    }
+
+    output {
+      rwpb:msg-new CreateUploadJobOutput;
+      leaf job-id {
+        description "The upload job-id to cancel";
+        type uint32;
+      }
+    }
+  }
+
+  rpc cancel-upload-job {
+    input {
+      rwpb:msg-new CancelUploadJob;
+      leaf job-id {
+        type uint32;
+      }
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang b/rwlaunchpad/plugins/yang/rw-launchpad.tailf.yang
new file mode 100644 (file)
index 0000000..1fab791
--- /dev/null
@@ -0,0 +1,37 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-launchpad-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad-annotation";
+  prefix "rw-launchpad-ann";
+
+  import rw-launchpad {
+    prefix rw-launchpad;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-launchpad:datacenters" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-launchpad.yang b/rwlaunchpad/plugins/yang/rw-launchpad.yang
new file mode 100644 (file)
index 0000000..37a9c85
--- /dev/null
@@ -0,0 +1,207 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+
+
+/**
+ * @file rw-launchpad.yang
+ * @author Joshua Downer
+ * @date 2015/09/14
+ * @brief Launchpad Yang
+ */
+
+module rw-launchpad
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-launchpad";
+  prefix "rw-launchpad";
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import rw-nsd {
+    prefix "rw-nsd";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+
+  import rw-conman {
+    prefix "rw-conman";
+  }
+
+  import rw-config-agent {
+    prefix "rw-config-agent";
+  }
+
+  import rw-monitor {
+    prefix "rw-monitor";
+  }
+
+  import rw-image-mgmt {
+    prefix "rw-image-mgmt";
+  }
+
+  revision 2015-09-14 {
+    description
+      "Initial revision.";
+  }
+
+  container datacenters {
+    description "OpenMano data centers";
+
+    rwpb:msg-new DataCenters;
+    config false;
+
+    list cloud-accounts {
+      description
+          "A list of OpenMano cloud accounts that have data centers associated
+          with them";
+
+      rwpb:msg-new CloudAccount;
+      key "name";
+
+      leaf name {
+        description "The name of the cloud account";
+        type leafref {
+          path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+        }
+      }
+
+      list datacenters {
+        rwpb:msg-new DataCenter;
+        leaf uuid {
+          description "The UUID of the data center";
+          type yang:uuid;
+        }
+
+        leaf name {
+          description "The name of the data center";
+          type string;
+        }
+      }
+    }
+  }
+
+  typedef resource-orchestrator-account-type {
+    description "RO account type";
+    type enumeration {
+      enum rift-ro;
+      enum openmano;
+    }
+  }
+
+  container resource-orchestrator {
+    rwpb:msg-new ResourceOrchestrator;
+
+    leaf name {
+       type string;
+    }
+
+    leaf account-type {
+      type resource-orchestrator-account-type;
+    }
+
+    choice resource-orchestrator {
+      description
+        "The resource orchestrator to use by the Launchpad";
+      default rift-ro;
+
+      case rift-ro {
+        description
+          "Use the RIFT.io resource orchestrator";
+
+        container rift-ro {
+          leaf rift-ro {
+            type empty;
+          }
+        }
+      }
+
+      case openmano {
+        description
+          "Use OpenMano as RO";
+
+        container openmano {
+          leaf host {
+            type string;
+            default "localhost";
+          }
+
+          leaf port {
+            type uint16;
+            default 9090;
+          }
+
+          leaf tenant-id {
+            type string {
+              length "36";
+            }
+            mandatory true;
+          }
+        }
+      }
+    }
+  }
+
+  container launchpad-config {
+    leaf public-ip {
+      description
+          "An IP address that can, at least, be reached by the host that the
+          launchpad is running on. This is not a mandatory but is required for
+          alarms to function correctly.";
+      type string;
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-monitor.yang b/rwlaunchpad/plugins/yang/rw-monitor.yang
new file mode 100644 (file)
index 0000000..559880d
--- /dev/null
@@ -0,0 +1,70 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-monitor.yang
+ * @author Joshua Downer
+ * @date 2015/10/30
+ * @brief NFVI Monitor
+ */
+
+module rw-monitor
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-monitor";
+  prefix "rw-monitor";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  revision 2015-10-30 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-nsm.yang b/rwlaunchpad/plugins/yang/rw-nsm.yang
new file mode 100644 (file)
index 0000000..4e6d9aa
--- /dev/null
@@ -0,0 +1,133 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-nsm.yang
+ * @author Rajesh Velandy
+ * @date 2015/10/07
+ * @brief NSM  yang
+ */
+
+module rw-nsm
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-nsm";
+  prefix "rw-nsm";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import ietf-inet-types {
+    prefix "inet";
+  }
+
+  import rw-nsd {
+    prefix "rw-nsd";
+  }
+  import nsd {
+    prefix "nsd";
+  }
+  import rw-nsr {
+    prefix "rw-nsr";
+  }
+  import vld {
+    prefix "vld";
+  }
+  import rw-vlr {
+    prefix "rw-vlr";
+  }
+  import rw-vns {
+    prefix "rw-vns";
+  }
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+  import vnfd {
+    prefix "vnfd";
+  }
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-launchpad {
+    prefix "rw-launchpad";
+  }
+
+  import rw-cloud {
+    prefix "rw-cloud";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  import rw-config-agent {
+    prefix "rw-config-agent";
+  }
+
+  revision 2015-10-07 {
+    description
+      "Initial revision.";
+  }
+
+  grouping cm-endpoint {
+    leaf cm-ip-address {
+      type inet:ip-address;
+      description "IP Address";
+      default "127.0.0.1";
+    }
+    leaf cm-port {
+      type inet:port-number;
+      description "Port Number";
+      default 2022;
+    }
+    leaf cm-username {
+      description "RO endpoint username";
+      type string;
+      default "admin";
+    }
+    leaf cm-password {
+      description "RO endpoint password";
+      type string;
+      default "admin";
+    }
+  }
+
+  container ro-config {
+    description "Resource Orchestrator endpoint ip address";
+    rwpb:msg-new "roConfig";
+    rwcli:new-mode "ro-config";
+
+    container cm-endpoint {
+      description "Service Orchestrator endpoint ip address";
+      rwpb:msg-new "SoEndpoint";
+      uses cm-endpoint;
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.tailf.yang
new file mode 100644 (file)
index 0000000..6b6e8b1
--- /dev/null
@@ -0,0 +1,42 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rw-resource-mgr-annotation
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr-annotation";
+  prefix "rw-resource-mgr-ann";
+
+  import rw-resource-mgr
+  {
+    prefix rw-resource-mgr;
+  }
+
+  import tailf-common {
+    prefix tailf;
+  }
+
+  tailf:annotate "/rw-resource-mgr:resource-pool-records" {
+    tailf:callpoint rw_callpoint;
+  }
+
+  tailf:annotate "/rw-resource-mgr:resource-mgmt" {
+    tailf:callpoint rw_callpoint;
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-resource-mgr.yang b/rwlaunchpad/plugins/yang/rw-resource-mgr.yang
new file mode 100644 (file)
index 0000000..9bf914a
--- /dev/null
@@ -0,0 +1,309 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+module rw-resource-mgr
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr";
+  prefix "rw-resource-mgr";
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-cloud {
+    prefix "rwcloud";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-10-16 {
+    description
+      "Initial revision.";
+  }
+
+  grouping resource-pool-info {
+    leaf name {
+      description "Name of the resource pool";
+      rwpb:field-inline "true";
+      rwpb:field-string-max 64;
+      type string;
+      //mandatory true;
+    }
+
+    leaf resource-type {
+      description "Type of resource";
+      type enumeration {
+        enum compute;
+        enum network;
+      }
+    }
+
+    leaf pool-type {
+      description "Type of pool";
+      type enumeration {
+        enum static;
+        enum dynamic;
+      }
+      default "static";
+    }
+
+    leaf max-size {
+      description "Maximum size to which a dynamic resource pool can grow";
+      type uint32;
+    }
+
+  }
+
+  container resource-mgr-config {
+    description "Data model for configuration of resource-mgr";
+    rwpb:msg-new ResourceManagerConfig;
+    config true;
+
+    container management-domain {
+      leaf name {
+        description "The management domain name this launchpad is associated with.";
+        rwpb:field-inline "true";
+        rwpb:field-string-max 64;
+        type string;
+        //mandatory true;
+      }
+    }
+
+    container resource-pools {
+      description "Resource Pool configuration";
+      rwpb:msg-new ResourcePools;
+      list cloud-account {
+        key "name";
+        leaf name {
+          description
+            "Resource pool for the configured cloud account";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+      }
+    }
+  }
+
+  grouping resource-state {
+    leaf resource-state {
+      type enumeration {
+        enum inactive;
+        enum active;
+        enum pending;
+        enum failed;
+      }
+    }
+    leaf resource-errors {
+      description "Error message details in case of failed resource state";
+      type string;
+    }
+  }
+
+  container resource-mgmt {
+    description "Resource management ";
+    config false;
+
+    container vdu-event {
+      description "Events for VDU Management";
+      rwpb:msg-new VDUEvent;
+
+      list vdu-event-data {
+        rwpb:msg-new VDUEventData;
+        key "event-id";
+
+        leaf event-id {
+          description "Identifier associated with the VDU transaction";
+          type yang:uuid;
+        }
+
+        leaf cloud-account {
+          description "The cloud account to use for this resource request";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        container request-info {
+          description "Information about required resource";
+
+          uses rwcal:vdu-create-params;
+        }
+
+        container resource-info {
+          description "Information about allocated resource";
+          leaf pool-name {
+            type string;
+          }
+          uses resource-state;
+          uses rwcal:vdu-info-params;
+        }
+      }
+    }
+
+    container vlink-event {
+      description "Events for Virtual Link management";
+      rwpb:msg-new VirtualLinkEvent;
+
+      list vlink-event-data {
+        rwpb:msg-new VirtualLinkEventData;
+
+        key "event-id";
+
+        leaf event-id {
+          description "Identifier associated with the Virtual Link transaction";
+          type yang:uuid;
+        }
+
+        leaf cloud-account {
+          description "The cloud account to use for this resource request";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        container request-info {
+          description "Information about required resource";
+
+          uses rwcal:virtual-link-create-params;
+        }
+
+        container resource-info {
+          leaf pool-name {
+            type string;
+          }
+          uses resource-state;
+          uses rwcal:virtual-link-info-params;
+        }
+      }
+    }
+  }
+
+
+  container resource-pool-records {
+    description "Resource Pool Records";
+    rwpb:msg-new ResourcePoolRecords;
+    config false;
+
+    list cloud-account {
+      key "name";
+      leaf name {
+        description
+          "The configured cloud account's pool records.";
+        type leafref {
+          path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+        }
+      }
+
+      list records {
+        rwpb:msg-new ResourceRecordInfo;
+        key "name";
+        uses resource-pool-info;
+
+        leaf pool-status {
+          type enumeration {
+            enum unknown;
+            enum locked;
+            enum unlocked;
+          }
+        }
+
+        leaf total-resources {
+          type uint32;
+        }
+
+        leaf free-resources {
+          type uint32;
+        }
+
+        leaf allocated-resources {
+          type uint32;
+        }
+      }
+    }
+  }
+
+
+  container resource-mgr-data{
+    description "Resource Manager operational data";
+    config false;
+
+    container pool-record {
+      description "Resource Pool record";
+
+      list cloud {
+        key "name";
+        max-elements 16;
+        rwpb:msg-new "ResmgrCloudPoolRecords";
+        leaf name {
+          description
+            "The configured cloud account's pool records.";
+          type leafref {
+            path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+          }
+        }
+
+        list records {
+          key "name";
+          uses resource-pool-info;
+
+          list free-vdu-list {
+            key vdu-id;
+            uses rwcal:vdu-info-params;
+          }
+
+          list in-use-vdu-list {
+            key vdu-id;
+            uses rwcal:vdu-info-params;
+          }
+
+          list free-vlink-list {
+            key virtual-link-id;
+            uses rwcal:virtual-link-info-params;
+          }
+
+          list in-use-vlink-list {
+              key virtual-link-id;
+            uses rwcal:virtual-link-info-params;
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-vnfm.yang b/rwlaunchpad/plugins/yang/rw-vnfm.yang
new file mode 100644 (file)
index 0000000..25e1abb
--- /dev/null
@@ -0,0 +1,78 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-vnfm.yang
+ * @author Rajesh Velandy
+ * @date 2015/10/07
+ * @brief VNFM  yang
+ */
+
+module rw-vnfm
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vnfm";
+  prefix "rw-vnfm";
+
+  import vld {
+    prefix "vld";
+  }
+
+  import vlr {
+    prefix "vlr";
+  }
+
+  import rw-vlr {
+    prefix "rw-vlr";
+  }
+
+  import rw-vns {
+    prefix "rw-vns";
+  }
+
+  import rw-vnfd {
+    prefix "rw-vnfd";
+  }
+
+  import rw-vnfr {
+    prefix "rw-vnfr";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-manifest {
+    prefix "rw-manifest";
+  }
+
+  import rw-resource-mgr {
+    prefix "rw-resource-mgr";
+  }
+
+  import rw-launchpad {
+    prefix "rw-launchpad";
+  }
+
+  revision 2015-10-07 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/plugins/yang/rw-vns.yang b/rwlaunchpad/plugins/yang/rw-vns.yang
new file mode 100644 (file)
index 0000000..0036e16
--- /dev/null
@@ -0,0 +1,96 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+
+/**
+ * @file rw-vns.yang
+ * @author Austin Cormier
+ * @date 2015/10/06
+ * @brief Virtual Network Service Yang
+ */
+
+module rw-vns
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-vns";
+  prefix "rw-vns";
+
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-cli-ext {
+    prefix "rwcli";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rwcal {
+    prefix "rwcal";
+  }
+
+  import rwsdn {
+    prefix "rwsdn";
+  }
+
+
+  import ietf-yang-types {
+    prefix "yang";
+  }
+
+  import rw-vlr {
+    prefix "rwvlr";
+  }
+
+  import vld {
+    prefix "vld";
+  }
+
+  import ietf-network {
+    prefix "nw";
+  }
+
+  import ietf-network-topology {
+    prefix "nt";
+  }
+
+  import ietf-l2-topology {
+    prefix "l2t";
+  }
+
+  import rw-topology {
+    prefix "rw-topology";
+  }
+
+  import rw-resource-mgr {
+    prefix "rw-resource-mgr";
+  }
+
+  import rw-sdn {
+    prefix "rw-sdn";
+  }
+
+  revision 2015-10-05 {
+    description
+      "Initial revision.";
+  }
+}
diff --git a/rwlaunchpad/ra/CMakeLists.txt b/rwlaunchpad/ra/CMakeLists.txt
new file mode 100644 (file)
index 0000000..cd07b92
--- /dev/null
@@ -0,0 +1,117 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 09/16/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    pingpong_longevity_systest
+    pingpong_vnf_systest
+    pingpong_records_systest
+    pingpong_vnf_reload_systest
+    pingpong_lp_ha_systest
+    pingpong_recovery_systest
+    pingpong_scaling_systest
+    scaling_systest
+  DESTINATION usr/rift/systemtest/pingpong_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  PROGRAMS
+    multi_vm_vnf_slb_systest.sh
+    multi_vm_vnf_trafgen_systest.sh
+  DESTINATION usr/rift/systemtest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/multivm_vnf/conftest.py
+    pytest/multivm_vnf/test_multi_vm_vnf_slb.py
+    pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
+    pytest/multivm_vnf/test_trafgen_data.py
+  DESTINATION usr/rift/systemtest/pytest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  PROGRAMS
+    launchpad_longevity_systest
+    launchpad_systest
+  DESTINATION usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/multi_tenant_systest_openstack.racfg
+  DESTINATION usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/conftest.py
+    pytest/test_launchpad.py
+    pytest/test_launchpad_longevity.py
+    pytest/test_start_standby.py
+    pytest/test_failover.py
+  DESTINATION usr/rift/systemtest/pytest/system
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/conftest.py
+    pytest/ns/test_onboard.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/pingpong/test_pingpong.py
+    pytest/ns/pingpong/test_pingpong_longevity.py
+    pytest/ns/pingpong/test_records.py
+    pytest/ns/pingpong/test_scaling.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns/pingpong
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    pytest/ns/haproxy/test_scaling.py
+  DESTINATION usr/rift/systemtest/pytest/system/ns/haproxy
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/pingpong_vnf_systest_cloudsim.racfg
+    racfg/pingpong_vnf_systest_openstack.racfg
+    racfg/pingpong_scaling_systest_openstack.racfg
+    racfg/pingpong_records_systest_cloudsim.racfg
+    racfg/pingpong_records_systest_openstack.racfg
+    racfg/pingpong_records_systest_openstack_xml.racfg
+    racfg/pingpong_vnf_reload_systest_openstack.racfg
+    racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
+    racfg/scaling_systest.racfg
+    racfg/recovery_systest.racfg
+    racfg/pingpong_lp_ha_systest_openstack.racfg
+  DESTINATION usr/rift/systemtest/pingpong_vnf
+  COMPONENT ${PKG_LONG_NAME})
+
+install(
+  FILES
+    racfg/multivm_vnf_slb_systest.racfg
+    racfg/multivm_vnf_trafgen_systest.racfg
+  DESTINATION usr/rift/systemtest/multi_vm_vnf
+  COMPONENT ${PKG_LONG_NAME})
diff --git a/rwlaunchpad/ra/launchpad_longevity_systest b/rwlaunchpad/ra/launchpad_longevity_systest
new file mode 100755 (executable)
index 0000000..f4370aa
--- /dev/null
@@ -0,0 +1,56 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/test_launchpad_longevity.py"
+
+test_cmd=""
+repeat=10
+repeat_keyword="longevity"
+repeat_system=1
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+test_rc=0
+for i in $(seq ${repeat_system});
+do
+       echo "CYCLE: $i"
+       eval ${test_cmd}
+       test_rc=$?
+       echo "DEBUG: Got test command rc: $test_rc"
+       if [[ ${test_rc} -ne 0 ]]; then
+               echo "Exiting with test_rc: $test_rc"
+               break
+        fi
+done
+
+# unit test XML files are converted to pretty printed format
+pretty_print_junit_xml
+
+exit ${test_rc}
diff --git a/rwlaunchpad/ra/launchpad_systest b/rwlaunchpad/ra/launchpad_systest
new file mode 100755 (executable)
index 0000000..54dfd6e
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/07/12
+#
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py"
+
+test_cmd=""
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh b/rwlaunchpad/ra/multi_vm_vnf_slb_systest.sh
new file mode 100755 (executable)
index 0000000..a2a1059
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Karun Ganesharatnam
+# Creation Date: 02/26/2016
+# 
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -vvv \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_multi_vm_vnf_slb.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_trafgen_data.py"
+
+test_cmd=""
+
+# Parse command-line argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables and create the mvv image
+mvv=true
+create_mvv_image_file
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh b/rwlaunchpad/ra/multi_vm_vnf_trafgen_systest.sh
new file mode 100755 (executable)
index 0000000..c88b95a
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Karun Ganesharatnam
+# Creation Date: 02/26/2016
+# 
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -vvv \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_multi_vm_vnf_trafgen.py \
+            ${PYTEST_DIR}/multi_vm_vnf/test_trafgen_data.py"
+
+test_cmd=""
+
+# Parse command-line argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables and create the mvv image
+mvv=true
+create_mvv_image_file
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_longevity_systest b/rwlaunchpad/ra/pingpong_longevity_systest
new file mode 100755 (executable)
index 0000000..7728f7f
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns//test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_pingpong_longevity.py"
+
+test_cmd=""
+repeat_keyword="longevity"
+repeat=10
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_lp_ha_systest b/rwlaunchpad/ra/pingpong_lp_ha_systest
new file mode 100755 (executable)
index 0000000..5647168
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 19-Feb-2016
+# 
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+  ${PYTEST_DIR}/system/test_launchpad.py \
+  ${PYTEST_DIR}/system/ns/test_onboard.py \
+  ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Force standalone launchpad
+lp_standalone=true
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/pingpong_records_systest b/rwlaunchpad/ra/pingpong_records_systest
new file mode 100755 (executable)
index 0000000..5897714
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/09/15
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+restconf=true
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_recovery_systest b/rwlaunchpad/ra/pingpong_recovery_systest
new file mode 100755 (executable)
index 0000000..b4cd426
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 29-Mar-2016
+# 
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -v -p no:cacheprovider --recovery --no-update \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# We want to run the test in expanded mode
+collapsed_mode=false
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
diff --git a/rwlaunchpad/ra/pingpong_scaling_systest b/rwlaunchpad/ra/pingpong_scaling_systest
new file mode 100755 (executable)
index 0000000..eca3ee6
--- /dev/null
@@ -0,0 +1,50 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/07/13
+#
+
+if [ -z $AUTO_TASK_ID ]; then
+    AUTO_TASK_ID=1
+    export AUTO_TASK_ID
+fi
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -s -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_scaling.py"
+
+test_prefix="pingpong_scaling_systest"
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+
+# display scaling log
+scaling_log="${RIFT_ARTIFACTS}/scaling_${AUTO_TASK_ID}.log"
+cat ${scaling_log}
diff --git a/rwlaunchpad/ra/pingpong_vnf_reload_systest b/rwlaunchpad/ra/pingpong_vnf_reload_systest
new file mode 100755 (executable)
index 0000000..609b1d4
--- /dev/null
@@ -0,0 +1,45 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/01/04
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'not Teardown or test_stop_launchpad' \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or test_wait_for_pingpong_configured or test_wait_for_pingpong_configured or Teardown' \
+                    ${PYTEST_DIR}/system/test_launchpad.py \
+                    ${PYTEST_DIR}/system/ns/test_onboard.py \
+                    ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pingpong_vnf_systest b/rwlaunchpad/ra/pingpong_vnf_systest
new file mode 100755 (executable)
index 0000000..24cd303
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2015/09/15
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/ra/pytest/conftest.py b/rwlaunchpad/ra/pytest/conftest.py
new file mode 100644 (file)
index 0000000..fc094fa
--- /dev/null
@@ -0,0 +1,131 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+import os
+import subprocess
+import sys
+
+import rift.auto.log
+import rift.auto.session
+import rift.vcs.vcs
+import logging
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+
+from gi.repository import RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_name_prefix():
+    '''fixture which returns the prefix used in cloud account names'''
+    return 'cloud'
+
+@pytest.fixture(scope='session')
+def cloud_account_name(cloud_name_prefix):
+    '''fixture which returns the name used to identify the cloud account'''
+    return '{prefix}-0'.format(prefix=cloud_name_prefix)
+
+@pytest.fixture(scope='session')
+def sdn_account_name():
+    '''fixture which returns the name used to identify the sdn account'''
+    return 'sdn-0'
+
+@pytest.fixture(scope='session')
+def sdn_account_type():
+    '''fixture which returns the account type used by the sdn account'''
+    return 'odl'
+
+@pytest.fixture(scope='session')
+def cloud_module():
+    '''Fixture containing the module which defines cloud account
+    Returns:
+        module to be used when configuring a cloud account
+    '''
+    return RwCloudYang
+
+@pytest.fixture(scope='session')
+def cloud_xpath():
+    '''Fixture containing the xpath that should be used to configure a cloud account
+    Returns:
+        xpath to be used when configure a cloud account
+    '''
+    return '/cloud/account'
+
+@pytest.fixture(scope='session')
+def cloud_accounts(cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type):
+    '''fixture which returns a list of CloudAccounts. One per tenant provided
+
+    Arguments:
+        cloud_module        - fixture: module defining cloud account
+        cloud_name_prefix   - fixture: name prefix used for cloud account
+        cloud_host          - fixture: cloud host address
+        cloud_user          - fixture: cloud account user key
+        cloud_tenants       - fixture: list of tenants to create cloud accounts on
+        cloud_type          - fixture: cloud account type
+
+    Returns:
+        A list of CloudAccounts
+    '''
+    accounts = []
+    for idx, cloud_tenant in enumerate(cloud_tenants):
+        cloud_account_name = "{prefix}-{idx}".format(prefix=cloud_name_prefix, idx=idx)
+
+        if cloud_type == 'lxc':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "cloudsim_proxy"})
+            )
+        elif cloud_type == 'openstack':
+            password = 'mypasswd'
+            auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
+            mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        'name':  cloud_account_name,
+                        'account_type': 'openstack',
+                        'openstack': {
+                            'admin': True,
+                            'key': cloud_user,
+                            'secret': password,
+                            'auth_url': auth_url,
+                            'tenant': cloud_tenant,
+                            'mgmt_network': mgmt_network}})
+            )
+        elif cloud_type == 'mock':
+            accounts.append(
+                    cloud_module.CloudAccount.from_dict({
+                        "name": cloud_account_name,
+                        "account_type": "mock"})
+            )
+
+    return accounts
+
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account(cloud_accounts):
+    '''fixture which returns an instance of CloudAccount
+
+    Arguments:
+        cloud_accounts - fixture: list of generated cloud accounts
+
+    Returns:
+        An instance of CloudAccount
+    '''
+    return cloud_accounts[0]
+
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py b/rwlaunchpad/ra/pytest/multivm_vnf/conftest.py
new file mode 100644 (file)
index 0000000..a3c565b
--- /dev/null
@@ -0,0 +1,139 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import gi
+import shlex
+import pytest
+import os
+import subprocess
+import tempfile
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    RwVnfrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def launchpad_host(request, confd_host):
+    return confd_host
+
+@pytest.fixture(scope='session')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='session')
+def vnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VnfrYang)
+
+@pytest.fixture(scope='session')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='session')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='session')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='session')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+@pytest.fixture(scope='session')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='session')
+def mvv_descr_dir(request):
+    """root-directory of descriptors files used for Multi-VM VNF"""
+    return os.path.join(
+        os.environ["RIFT_INSTALL"],
+        "demos/tests/multivm_vnf"
+        )
+
+@pytest.fixture(scope='session')
+def package_dir(request):
+    return tempfile.mkdtemp(prefix="mvv_")
+
+@pytest.fixture(scope='session')
+def trafgen_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafgen_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafgen_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def trafsink_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='vnfd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'))
+    pkg_file = os.path.join(package_dir, 'multivm_trafsink_vnfd.tar.gz')
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "vnfd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_trafsink_vnfd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(command)
+    return pkg_file
+
+@pytest.fixture(scope='session')
+def slb_vnfd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type vnfd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_slb_vnfd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_slb_vnfd.tar.gz')
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_slb.py
new file mode 100755 (executable)
index 0000000..557518b
--- /dev/null
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_slb.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --outdir {outdir} --infile {infile} --descriptor-type nsd --format xml".format(
+            pkg_scr=package_gen_script,
+            outdir=package_dir,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_slb_ts_config_nsd.xml'),
+            )
+    pkg_file = os.path.join(package_dir, 'multivm_tg_slb_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlb(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+#         if not cls.pkg_dir is None:
+#            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_slb_vnfd(self, logger, launchpad_host, vnfd_proxy, slb_vnfd_package_file):
+        TestMultiVmVnfSlb.pkg_dir = os.path.dirname(slb_vnfd_package_file)
+        logger.info("Onboarding slb vnfd package: %s", slb_vnfd_package_file)
+        trans_id = upload_descriptor(logger, slb_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be two vnfds"
+        assert "multivm_slb_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_slb_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_slb_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfSlbTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
new file mode 100755 (executable)
index 0000000..ca6e9b5
--- /dev/null
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_multi_vm_vnf_trafgen.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import shutil
+import subprocess
+import time
+import uuid
+
+from gi.repository import (
+    NsdYang,
+    NsrYang,
+    RwNsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+import rift.auto.mano
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='module')
+def multi_vm_vnf_nsd_package_file(request, package_gen_script, mvv_descr_dir, package_dir):
+    pkg_cmd = "{pkg_scr} --descriptor-type='nsd' --format='xml' --infile='{infile}' --outdir='{outdir}'".format(
+            pkg_scr=package_gen_script,
+            infile=os.path.join(mvv_descr_dir, 'nsd/xml/multivm_tg_ts_config_nsd.xml'),
+            outdir=package_dir)
+    pkg_file = os.path.join(package_dir, 'multivm_tg_ts_config_nsd.tar.gz')
+    logger.debug("Generating NSD package: %s", pkg_file)
+    command = shlex.split(pkg_cmd)
+    print("Running the command arguments: %s" % command)
+    command = [package_gen_script,
+               "--descriptor-type", "nsd",
+               "--format", "xml",
+               "--infile", "%s" % os.path.join(mvv_descr_dir, 'vnfd/xml/multivm_tg_ts_config_nsd.xml'),
+               "--outdir", "%s" % package_dir]
+    print("Running new command arguments: %s" % command)
+    subprocess.check_call(shlex.split(pkg_cmd))
+    return pkg_file
+
+def create_nsr(nsd_id, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd_id             -  NSD id
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" http://{host}:4567/api/upload '.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    def check_status_onboard_status():
+        uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(
+                uri=uri
+                )
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+    logger.info("Descriptor onboard was successful")
+
+
+@pytest.mark.setup('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenApp(object):
+    pkg_dir = None
+    @classmethod
+    def teardown_class(cls):
+        """ remove the temporary directory contains the descriptor packages
+        """
+        logger.debug("Removing the temporary package directory: %s", cls.pkg_dir)
+        if not cls.pkg_dir is None:
+            shutil.rmtree(cls.pkg_dir)
+
+    def test_onboard_trafgen_vnfd(self, logger, launchpad_host, vnfd_proxy, trafgen_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafgen_vnfd_package_file)
+        logger.info("Onboarding trafgen vnfd package: %s", trafgen_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "multivm_trafgen_vnfd"
+
+    def test_onboard_trafsink_vnfd(self, logger, launchpad_host, vnfd_proxy, trafsink_vnfd_package_file):
+        TestMultiVmVnfTrafgenApp.pkg_dir = os.path.dirname(trafsink_vnfd_package_file)
+        logger.info("Onboarding trafsink vnfd package: %s", trafsink_vnfd_package_file)
+        trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_multi_vm_vnf_nsd(self, logger, launchpad_host, nsd_proxy, multi_vm_vnf_nsd_package_file):
+        logger.info("Onboarding tg_ts nsd package: %s", multi_vm_vnf_nsd_package_file)
+        trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
+        wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "multivm_tg_ts_config_nsd"
+
+    def test_instantiate_multi_vm_vnf_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters (running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (input_param.xpath == config_param.xpath and
+                    input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_nsr_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+        rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
+
+
+@pytest.mark.teardown('multivmvnf')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultiVmVnfTrafgenAppTeardown(object):
+    def test_terminate_nsr(self, nsr_proxy, vnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Multi VM VNF's NSR")
+
+        nsr_path = "/ns-instance-config"
+        nsr = rwnsr_proxy.get_config(nsr_path)
+
+        ping_pong = nsr.nsr[0]
+        rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+        time.sleep(30)
+
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        time.sleep(5)
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py b/rwlaunchpad/ra/pytest/multivm_vnf/test_trafgen_data.py
new file mode 100644 (file)
index 0000000..197e95c
--- /dev/null
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_trafgen_data.py
+@author Karun Ganesharatnam (karun.ganesharatnam@riftio.com)
+@date 03/16/2016
+@brief Scriptable load-balancer test with multi-vm VNFs
+"""
+
+import ipaddress
+import pytest
+import re
+import subprocess
+import time
+
+import rift.auto.session
+
+from gi.repository import (
+    RwTrafgenYang,
+    RwTrafgenDataYang,
+    RwVnfBaseOpdataYang,
+    RwVnfBaseConfigYang,
+    RwTrafgenYang
+)
+
+
+@pytest.fixture(scope='session')
+def trafgen_vnfr(request, rwvnfr_proxy, session_type):
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+        if 'trafgen' in vnfr.short_name:
+            return vnfr
+    assert False, "Not found the VNFR with name 'trafgen'"
+
+@pytest.fixture(scope='session')
+def trafgen_session(request, trafgen_vnfr, session_type):
+    trafgen_host = trafgen_vnfr.vnf_configuration.config_access.mgmt_ip_address
+    if session_type == 'netconf':
+        tg_session = rift.auto.session.NetconfSession(host=trafgen_host)
+    elif session_type == 'restconf':
+        tg_session = rift.auto.session.RestconfSession(host=trafgen_host)
+
+    tg_session.connect()
+    rift.vcs.vcs.wait_until_system_started(tg_session, 900)
+    return tg_session
+
+@pytest.fixture(scope='session')
+def trafgen_ports(request, trafgen_vnfr, session_type):
+    return [cp.name for cp in trafgen_vnfr.connection_point]
+
+@pytest.fixture(scope='module')
+def tgdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenDataYang'''
+    return trafgen_session.proxy(RwTrafgenDataYang)
+
+
+@pytest.fixture(scope='module')
+def tgcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwTrafgenYang'''
+    return trafgen_session.proxy(RwTrafgenYang)
+
+
+@pytest.fixture(scope='module')
+def vnfdata_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseOpdataYang'''
+    return trafgen_session.proxy(RwVnfBaseOpdataYang)
+
+
+@pytest.fixture(scope='module')
+def vnfcfg_proxy(trafgen_session):
+    '''fixture that returns a proxy to RwVnfBaseConfigYang'''
+    return trafgen_session.proxy(RwVnfBaseConfigYang)
+
+
+def confirm_config(tgcfg_proxy, vnf_name):
+    '''To ensure the configuration is present for the given VNF
+
+    Arguments:
+        vnf_name - vnf name of configuration
+    '''
+    xpath = "/vnf-config/vnf[name='%s'][instance='0']" % vnf_name
+    for _ in range(24):
+        tg_config = tgcfg_proxy.get_config(xpath)
+        if tg_config is not None:
+            break
+        time.sleep(10)
+    else:
+        assert False, "Configuration check timeout"
+
+
+def start_traffic(tgdata_proxy, tgcfg_proxy, port_name):
+    '''Start traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to start traffic
+    '''
+    confirm_config(tgcfg_proxy, 'trafgen')
+    rpc_input = RwTrafgenDataYang.RwStartTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Start_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='start', output_obj=rpc_output)
+
+
+def stop_traffic(tgdata_proxy, port_name):
+    '''Stop traffic on the port with the specified name.
+
+    Arguments:
+        port_name - name of port on which to stop traffic
+    '''
+    rpc_input = RwTrafgenDataYang.RwStopTrafgenTraffic.from_dict({
+        'vnf_name':'trafgen',
+        'vnf_instance':0,
+        'port_name':port_name
+    })
+    rpc_output = RwVnfBaseOpdataYang.YangOutput_RwVnfBaseOpdata_Stop_VnfOutput()
+    tgdata_proxy.rpc(rpc_input, rpc_name='stop', output_obj=rpc_output)
+
+
+def wait_for_traffic_started(vnfdata_proxy, vnf_name, port_name, timeout=120, interval=2, threshold=60):
+    '''Wait for traffic to be started on the specified port
+
+    Traffic is determined to be started if the input/output packets on the port
+    increment during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threhsold - values under the threshold treated as 0
+    '''
+    def value_incremented(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        beyond the specified threshold during the sampling interval
+        otherwise returns false
+        '''
+        return (int(current_sample) - int(previous_sample)) > threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'),
+                                    value_incremented, timeout=timeout, interval=interval)
+
+
+def wait_for_traffic_stopped(vnfdata_proxy, vnf_name, port_name, timeout=60, interval=2, threshold=60):
+    '''Wait for traffic to be stopped on the specified port
+
+    Traffic is determined to be stopped if the input/output packets on the port
+    remain unchanged during the specified interval
+
+    Arguments:
+        port_name - name of the port being monitored
+        timeout - time allowed for traffic to start
+        interval - interval at which the counters should be checked
+        threshold - values under the threshold treated as 0
+    '''
+    def value_unchanged(previous_sample, current_sample):
+        '''Comparison that returns True if the the sampled counter increased
+        less than the specified threshold during the sampling interval
+        otherwise returns False
+        '''
+        return (int(current_sample) - int(previous_sample)) < threshold
+
+    xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
+    vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'), value_unchanged, timeout=timeout, interval=interval)
+
+@pytest.mark.depends('multivmvnf')
+@pytest.mark.incremental
+class TestMVVSlbDataFlow:
+
+    def test_start_stop_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test verfies that traffic can be stopped and started on
+        all trafgen ports.
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        time.sleep(300)
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
+            stop_traffic(tgdata_proxy, port)
+            wait_for_traffic_stopped(vnfdata_proxy, 'trafgen',  port)
+
+
+    def test_start_traffic(self, vnfdata_proxy, tgdata_proxy, tgcfg_proxy, trafgen_ports):
+        ''' This test starts traffic on all trafgen ports in preperation for
+        subsequent tests
+
+        Arguments:
+            vnfdata_proxy - proxy to retrieve vnf operational data
+            tgdata_proxy - proxy to retrieve trafgen operational data
+            tgcfg_proxy - proxy to retrieve trafgen configuration
+            trafgen_ports - list of port names on which traffic can be started
+        '''
+        for port in trafgen_ports:
+            start_traffic(tgdata_proxy, tgcfg_proxy, port)
+            wait_for_traffic_started(vnfdata_proxy, 'trafgen', port)
diff --git a/rwlaunchpad/ra/pytest/ns/conftest.py b/rwlaunchpad/ra/pytest/ns/conftest.py
new file mode 100644 (file)
index 0000000..a1fa446
--- /dev/null
@@ -0,0 +1,292 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import functools
+import hashlib
+import pytest
+import os
+import tempfile
+import shutil
+import subprocess
+
+import gi
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+import rift.vcs.vcs
+
+class PackageError(Exception):
+    pass
+
+@pytest.fixture(scope='session', autouse=True)
+def cloud_account_name(request):
+    '''fixture which returns the name used to identify the cloud account'''
+    return 'cloud-0'
+
+@pytest.fixture(scope='session')
+def ping_pong_install_dir():
+    '''Fixture containing the location of ping_pong installation
+    '''
+    install_dir = os.path.join(
+        os.environ["RIFT_ROOT"],
+        "images"
+        )
+    return install_dir
+
+@pytest.fixture(scope='session')
+def ping_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(ping_pkg_file):
+        raise_package_error()
+
+    return ping_pkg_file
+
+
+@pytest.fixture(scope='session')
+def pong_vnfd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the pong vnfd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "pong_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(pong_pkg_file):
+        raise_package_error()
+
+    return pong_pkg_file
+
+
+@pytest.fixture(scope='session')
+def ping_pong_nsd_package_file(ping_pong_install_dir):
+    '''Fixture containing the location of the ping_pong_nsd package
+
+    Arguments:
+        ping_pong_install_dir - location of ping_pong installation
+    '''
+    ping_pong_pkg_file = os.path.join(
+            ping_pong_install_dir,
+            "ping_pong_nsd.tar.gz",
+            )
+    if not os.path.exists(ping_pong_pkg_file):
+        raise_package_error()
+
+    return ping_pong_pkg_file
+
+@pytest.fixture(scope='session')
+def image_dirs():
+    ''' Fixture containing a list of directories where images can be found
+    '''
+    rift_build = os.environ['RIFT_BUILD']
+    rift_root = os.environ['RIFT_ROOT']
+    image_dirs = [
+        os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/",
+            "ping_pong_ns/ping_vnfd_with_image/images"
+        ),
+        os.path.join(
+            rift_root,
+            "images"
+        )
+    ]
+    return image_dirs
+
+@pytest.fixture(scope='session')
+def image_paths(image_dirs):
+    ''' Fixture containing a mapping of image names to their path images
+
+    Arguments:
+        image_dirs - a list of directories where images are located
+    '''
+    image_paths = {}
+    for image_dir in image_dirs:
+        if os.path.exists(image_dir):
+            names = os.listdir(image_dir)
+            image_paths.update({name:os.path.join(image_dir, name) for name in names})
+    return image_paths
+
+@pytest.fixture(scope='session')
+def path_ping_image(image_paths):
+    ''' Fixture containing the location of the ping image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-ping.qcow2"]
+
+@pytest.fixture(scope='session')
+def path_pong_image(image_paths):
+    ''' Fixture containing the location of the pong image
+
+    Arguments:
+        image_paths - mapping of images to their paths
+    '''
+    return image_paths["Fedora-x86_64-20-20131211.1-sda-pong.qcow2"]
+
+class PingPongFactory:
+    def __init__(self, path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+        self.path_ping_image = path_ping_image
+        self.path_pong_image = path_pong_image
+        self.rsyslog_host = rsyslog_host
+        self.rsyslog_port = rsyslog_port
+
+    def generate_descriptors(self):
+        '''Return a new set of ping and pong descriptors
+        '''
+        def md5sum(path):
+            with open(path, mode='rb') as fd:
+                md5 = hashlib.md5()
+                for buf in iter(functools.partial(fd.read, 4096), b''):
+                    md5.update(buf)
+            return md5.hexdigest()
+
+        ping_md5sum = md5sum(self.path_ping_image)
+        pong_md5sum = md5sum(self.path_pong_image)
+
+        ex_userdata = None
+        if self.rsyslog_host and self.rsyslog_port:
+            ex_userdata = '''
+rsyslog:
+  - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
+  - "*.* @{host}:{port}"
+            '''.format(
+                host=self.rsyslog_host,
+                port=self.rsyslog_port,
+            )
+
+        descriptors = ping_pong.generate_ping_pong_descriptors(
+                pingcount=1,
+                ping_md5sum=ping_md5sum,
+                pong_md5sum=pong_md5sum,
+                ex_ping_userdata=ex_userdata,
+                ex_pong_userdata=ex_userdata,
+        )
+
+        return descriptors
+
+@pytest.fixture(scope='session')
+def ping_pong_factory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+    '''Fixture returns a factory capable of generating ping and pong descriptors
+    '''
+    return PingPongFactory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port)
+
+@pytest.fixture(scope='session')
+def ping_pong_records(ping_pong_factory):
+    '''Fixture returns the default set of ping_pong descriptors
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptors(request, ping_pong_records):
+    def pingpong_descriptors(with_images=True):
+        """Generated the VNFDs & NSD files for pingpong NS.
+
+        Returns:
+            Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+        """
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        tmpdir = tempfile.mkdtemp()
+        rift_build = os.environ['RIFT_BUILD']
+        MANO_DIR = os.path.join(
+                rift_build,
+                "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+        ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+        pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+        """ grab cached copies of these files if not found. They may not exist 
+            because our git submodule dependency mgmt
+            will not populate these because they live in .build, not .install
+        """
+        if not os.path.exists(ping_img):
+            ping_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+            pong_img = os.path.join(
+                        os.environ['RIFT_ROOT'], 
+                        'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+        for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+            descriptor.write_to_file(output_format='xml', outdir=tmpdir)
+
+        ping_img_path = os.path.join(tmpdir, "{}/images/".format(ping_vnfd.name))
+        pong_img_path = os.path.join(tmpdir, "{}/images/".format(pong_vnfd.name))
+
+        if with_images:
+            os.makedirs(ping_img_path)
+            os.makedirs(pong_img_path)
+            shutil.copy(ping_img, ping_img_path)
+            shutil.copy(pong_img, pong_img_path)
+
+        for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+            subprocess.call([
+                    "sh",
+                    "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                    tmpdir,
+                    dir_name])
+
+        return (os.path.join(tmpdir, "{}.tar.gz".format(ping_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(pong_vnfd.name)),
+                os.path.join(tmpdir, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+    def haproxy_descriptors():
+        """HAProxy descriptors."""
+        files = [
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/http_client/http_client_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/httpd/httpd_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy/haproxy_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/waf/waf_vnfd.tar.gz"),
+            os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/haproxy_waf_httpd_nsd/haproxy_waf_httpd_nsd.tar.gz")
+            ]
+
+        return files
+
+    if request.config.option.network_service == "pingpong":
+        return pingpong_descriptors()
+    elif request.config.option.network_service == "pingpong_noimg":
+        return pingpong_descriptors(with_images=False)
+    elif request.config.option.network_service == "haproxy":
+        return haproxy_descriptors()
+
+
+@pytest.fixture(scope='session')
+def descriptor_images(request):
+    def haproxy_images():
+        """HAProxy images."""
+        images = [
+            os.path.join(os.getenv('RIFT_ROOT'), "images/haproxy-v03.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-app-firewall-v02.qcow2"),
+            os.path.join(os.getenv('RIFT_ROOT'), "images/web-server-v02.qcow2")
+            ]
+
+        return images
+
+    if request.config.option.network_service == "haproxy":
+        return haproxy_images()
+
+    return []
diff --git a/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py b/rwlaunchpad/ra/pytest/ns/haproxy/test_scaling.py
new file mode 100644 (file)
index 0000000..846ef2e
--- /dev/null
@@ -0,0 +1,170 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import pytest
+
+from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+import rift.auto.session
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+
+ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+
+INSTANCE_ID = 1
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.incremental
+class TestScaling:
+    def wait_for_nsr_state(self, proxy, state):
+        """Wait till the NSR reaches a desired state.
+
+        Args:
+            proxy (Callable): Proxy for launchpad session.
+            state (str): Expected state
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr = nsr_opdata.nsr[0]
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, state, timeout=240)
+
+    def verify_scaling_group(self, proxy, group_name, expected_records_count, scale_out=True):
+        """
+        Args:
+            proxy (Callable): LP session
+            group_name (str): Group name which is being scaled up.
+            scale_out (bool, optional): To identify scale-out/scale-in mode.
+
+        Asserts:
+            1. Additional records are added to the opdata
+            2. Status of the scaling group
+            3. New vnfr record has been created.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsr_id = nsr_opdata.nsr[0].ns_instance_config_ref
+
+        xpath = ('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'
+                 '/scaling-group-record[scaling-group-name-ref="{}"]').format(
+                        nsr_id, group_name)
+
+        scaling_record = proxy(NsrYang).get(xpath)
+
+        assert len(scaling_record.instance) == expected_records_count
+
+        for instance in scaling_record.instance:
+            assert instance.op_status == 'running'
+
+            for vnfr in instance.vnfrs:
+                vnfr_record = proxy(RwVnfrYang).get(
+                        "/vnfr-catalog/vnfr[id='{}']".format(vnfr))
+                assert vnfr_record is not None
+
+    def verify_scale_up(self, proxy, group_name, expected):
+        """Verifies the scaling up steps for the group
+        NSR moves from running -> scaling-up -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): Name of the group to verify.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-out")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected)
+
+    def verify_scale_in(self, proxy, group_name, expected):
+        """Verifies the scaling in streps for the group.
+        NSR moves from running -> scaling-down -> running
+
+        Args:
+            proxy (callable): LP proxy
+            group_name (str): group name.
+        """
+        self.wait_for_nsr_state(proxy, "scaling-in")
+        self.wait_for_nsr_state(proxy, "running")
+        self.verify_scaling_group(proxy, group_name, expected, scale_out=False)
+
+    def test_wait_for_nsr_configured(self, proxy):
+        """Wait till the NSR state moves to configured before starting scaling
+        tests.
+        """
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240)
+
+
+    def test_min_max_scaling(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        nsd_id = nsrs[0].nsd_ref
+        nsr_id = nsrs[0].ns_instance_config_ref
+
+        # group_name = "http_client_group"
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/scaling-group-record".format(nsr_id)
+        scaling_records = proxy(RwNsrYang).get(xpath, list_obj=True)
+
+        for scaling_record in scaling_records.scaling_group_record:
+            group_name = scaling_record.scaling_group_name_ref
+            xpath = "/nsd-catalog/nsd[id='{}']/scaling-group-descriptor[name='{}']".format(
+                    nsd_id, group_name)
+            scaling_group_desc = proxy(NsdYang).get(xpath)
+
+            # Add + 1 to go beyond the threshold
+            for instance_id in range(1, scaling_group_desc.max_instance_count + 1):
+                xpath = '/ns-instance-config/nsr[id="{}"]/scaling-group[scaling-group-name-ref="{}"]'.format(
+                            nsr_id, 
+                            group_name)
+
+                instance = ScalingGroupInstance.from_dict({"id": instance_id})
+                scaling_group = proxy(NsrYang).get(xpath)
+
+                if scaling_group is None:
+                    scaling_group = ScalingGroup.from_dict({
+                        'scaling_group_name_ref': group_name,
+                        })
+
+                scaling_group.instance.append(instance)
+
+                try:
+                    proxy(NsrYang).merge_config(xpath, scaling_group)
+                    self.verify_scale_up(proxy, group_name, instance_id + 1)
+                except rift.auto.session.ProxyRequestError:
+                    assert instance_id == scaling_group_desc.max_instance_count
+
+            for instance_id in range(1, scaling_group_desc.max_instance_count):
+                xpath = ('/ns-instance-config/nsr[id="{}"]/scaling-group'
+                         '[scaling-group-name-ref="{}"]/'
+                         'instance[id="{}"]').format(
+                         nsr_id, group_name, instance_id)
+                proxy(NsrYang).delete_config(xpath)
+                self.verify_scale_in(proxy, group_name, instance_id)
+
+
+
+
+
+
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong.py
new file mode 100644 (file)
index 0000000..45a7832
--- /dev/null
@@ -0,0 +1,677 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 11/03/2015
+@brief Launchpad System Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import tempfile
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+class DescriptorOnboardError(Exception):
+    pass
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+        nsd                 - NSD
+        input_param_list    - list of input-parameter objects
+        cloud_account_name  - name of cloud account
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsr.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl --insecure -F "descriptor=@{file}" https://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+    )
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1"):
+
+    def check_status_onboard_status():
+        uri = 'https://%s:4567/api/upload/%s/state' % (host, transaction_id)
+        curl_cmd = 'curl --insecure {uri}'.format(uri=uri)
+        return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    logger.info("Waiting for onboard transaction [%s] to complete", transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+
+        reply = check_status_onboard_status()
+        state = json.loads(reply)
+        if state["status"] == "success":
+            break
+
+        if state["status"] == "failure":
+            raise DescriptorOnboardError(state["errors"])
+
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            host=host)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        host=host)
+
+
+def terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating Ping Pong NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for ping_pong in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    time.sleep(60)
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+def generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd):
+    """Converts the descriptor to files and package them into zip files
+    that can be uploaded to LP instance.
+
+    Args:
+        tmpdir (string): Full path where the zipped files should be
+        ping_vnfd (VirtualNetworkFunction): Ping VNFD data
+        pong_vnfd (VirtualNetworkFunction): Pong VNFD data
+        ping_pong_nsd (NetworkService): PingPong NSD data
+
+    Returns:
+        Tuple: file path for ping vnfd, pong vnfd and ping_pong_nsd
+    """
+    rift_build = os.environ['RIFT_BUILD']
+    MANO_DIR = os.path.join(
+            rift_build,
+            "modules/core/mano/src/core_mano-build/examples/ping_pong_ns")
+    ping_img = os.path.join(MANO_DIR, "ping_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2")
+    pong_img = os.path.join(MANO_DIR, "pong_vnfd_with_image/images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2")
+
+    """ grab cached copies of these files if not found. They may not exist
+        because our git submodule dependency mgmt
+        will not populate these because they live in .build, not .install
+    """
+    if not os.path.exists(ping_img):
+        ping_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2')
+        pong_img = os.path.join(
+                    os.environ['RIFT_ROOT'],
+                    'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
+
+    for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
+        descriptor.write_to_file(output_format='xml', outdir=tmpdir.name)
+
+    ping_img_path = os.path.join(tmpdir.name, "{}/images/".format(ping_vnfd.name))
+    pong_img_path = os.path.join(tmpdir.name, "{}/images/".format(pong_vnfd.name))
+    os.makedirs(ping_img_path)
+    os.makedirs(pong_img_path)
+
+    shutil.copy(ping_img, ping_img_path)
+    shutil.copy(pong_img, pong_img_path)
+
+    for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
+        subprocess.call([
+                "sh",
+                "{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
+                tmpdir.name,
+                dir_name])
+
+    return (os.path.join(tmpdir.name, "{}.tar.gz".format(ping_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(pong_vnfd.name)),
+            os.path.join(tmpdir.name, "{}.tar.gz".format(ping_pong_nsd.name)))
+
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestPingPongStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start & stop the ping pong NSR
+    3. Update the exiting descriptor files.
+    4. Start the ping pong NSR.
+
+    """
+
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "upload"
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.feature("update-api")
+@pytest.mark.depends('pingpong')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestUpdateNsr(object):
+    def test_stop_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """Terminate the currently running NSR instance before updating the descriptor files"""
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_onboard_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            ping_pong_records):
+        """Generates & On-boards the descriptors.
+        """
+        temp_dirs = []
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        endpoint = "update"
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records                
+
+        """
+        This upload routine can get called multiples times for upload API,
+        depending on the combinations of 'cloud_account' & 'endpoint'
+        fixtures. Since the records are cached at module level, we might end up
+        uploading the same uuids multiple times, thus causing errors. So a
+        simple work-around will be to skip the records when they are uploaded
+        for the second time.
+        """
+        def onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file):
+            # On-board VNFDs
+            for file_name in [ping_vnfd_file, pong_vnfd_file]:
+                onboard_descriptor(
+                        mgmt_session.host,
+                        file_name,
+                        logger,
+                        endpoint,
+                        scheme,
+                        cert)
+
+            catalog = vnfd_proxy.get_config('/vnfd-catalog')
+            vnfds = catalog.vnfd
+
+            assert len(vnfds) == 2, "There should two vnfds"
+            assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
+            assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+        def delete_nsds():
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            for nsd_record in nsds.nsd:
+                xpath = "/nsd-catalog/nsd[id='{}']".format(nsd_record.id)
+                nsd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+            assert nsds is None or len(nsds.nsd) == 0
+        delete_nsds()
+
+        def delete_vnfds():
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            for vnfd_record in vnfds.vnfd:
+                xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+                vnfd_proxy.delete_config(xpath)
+
+            time.sleep(5)
+            vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+            assert vnfds is None or len(vnfds.vnfd) == 0
+
+        delete_vnfds()
+
+        if catalog is not None and len(catalog.vnfd) == 2 and endpoint == "upload":
+            return
+
+        ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
+
+        if endpoint == "update":
+            for vnfd_record in [ping_vnfd, pong_vnfd]:
+                vnfd_record.descriptor.vnfd[0].description += "_update"
+            ping_pong_nsd.descriptor.nsd[0].description += "_update"
+
+        tmpdir2 = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir2)
+        ping_pong.generate_ping_pong_descriptors(pingcount=1,
+                                                  write_to_file=True,
+                                                  out_dir=tmpdir2.name,
+                                                  ping_fmt='json',
+                                                  pong_fmt='xml',
+                                                  )
+
+        # On-board VNFDs without image
+        ping_vnfd_file = os.path.join(tmpdir2.name, 'ping_vnfd/vnfd/ping_vnfd.json')
+        pong_vnfd_file = os.path.join(tmpdir2.name, 'pong_vnfd/vnfd/pong_vnfd.xml')
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+        delete_vnfds()
+
+        tmpdir = tempfile.TemporaryDirectory()
+        temp_dirs.append(tmpdir)
+
+        ping_vnfd_file, pong_vnfd_file, pingpong_nsd_file = \
+            generate_tar_files(tmpdir, ping_vnfd, pong_vnfd, ping_pong_nsd)
+
+        # On-board VNFDs with image
+        onboard_ping_pong_vnfds(ping_vnfd_file, pong_vnfd_file)
+
+
+        # On-board NSD
+        onboard_descriptor(
+                mgmt_session.host,
+                pingpong_nsd_file,
+                logger,
+                endpoint,
+                scheme,
+                cert)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        assert nsds[0].name == "ping_pong_nsd"
+
+        # Temp directory cleanup
+#         for temp_dir in temp_dirs:
+#             temp_dir.cleanup()
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+        descr_value = "automation"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_pingpong_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+
+    def test_wait_for_pingpong_configured(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
+                    nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+
+
+@pytest.mark.teardown('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestPingPongTeardown(object):
+    def test_terminate_nsrs(self, rwvnfr_proxy, rwnsr_proxy, logger):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating Ping Pong NSR")
+        terminate_nsrs(rwvnfr_proxy, rwnsr_proxy, logger)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_pingpong_longevity.py
new file mode 100644 (file)
index 0000000..ff8fa96
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import pytest
+import rift.vcs.vcs
+import time
+
+import gi
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name, rwnsr_proxy):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
+    nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+    for nsr in nsr_opdata.nsr:
+        xpath = ("/ns-instance-opdata"
+                 "/nsr[ns-instance-config-ref='%s']"
+                 "/operational-status") % (nsr.ns_instance_config_ref)
+        operational_status = rwnsr_proxy.get(xpath)
+        assert operational_status == 'running'
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_records.py
new file mode 100644 (file)
index 0000000..920bd70
--- /dev/null
@@ -0,0 +1,487 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import socket
+import subprocess
+import time
+
+import pytest
+
+import gi
+import re
+gi.require_version('RwNsrYang', '1.0')
+from gi.repository import (
+        NsdYang,
+        RwBaseYang,
+        RwConmanYang,
+        RwNsrYang,
+        RwNsdYang,
+        RwVcsYang,
+        RwVlrYang,
+        RwVnfdYang,
+        RwVnfrYang,
+        VlrYang,
+        VnfrYang,
+        )
+import rift.auto.session
+import rift.mano.examples.ping_pong_nsd as ping_pong
+
+
+@pytest.fixture(scope='module')
+def proxy(request, mgmt_session):
+    return mgmt_session.proxy
+
+@pytest.fixture(scope='session')
+def updated_ping_pong_records(ping_pong_factory):
+    '''Fixture returns a newly created set of ping and pong descriptors
+    for the create_update tests
+    '''
+    return ping_pong_factory.generate_descriptors()
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+
+def yield_nsd_nsr_pairs(proxy):
+    """Yields tuples of NSD & NSR
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSD and its corresponding NSR record
+    """
+
+    for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+        nsd_path = "/nsd-catalog/nsd[id='{}']".format(
+                nsr_cfg.nsd.id)
+        nsd = proxy(RwNsdYang).get_config(nsd_path)
+
+        yield nsd, nsr
+
+def yield_nsrc_nsro_pairs(proxy):
+    """Yields tuples of NSR Config & NSR Opdata pairs
+
+    Args:
+        proxy (callable): Launchpad proxy
+
+    Yields:
+        Tuple: NSR config and its corresponding NSR op record
+    """
+    nsr = "/ns-instance-opdata/nsr"
+    nsrs = proxy(RwNsrYang).get(nsr, list_obj=True)
+    for nsr in nsrs.nsr:
+        nsr_cfg_path = "/ns-instance-config/nsr[id='{}']".format(
+                nsr.ns_instance_config_ref)
+        nsr_cfg = proxy(RwNsrYang).get_config(nsr_cfg_path)
+
+        yield nsr_cfg, nsr
+
+
+def assert_records(proxy):
+    """Verifies if the NSR & VNFR records are created
+    """
+    ns_tuple = list(yield_nsd_nsr_pairs(proxy))
+    assert len(ns_tuple) == 1
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(proxy))
+    assert len(vnf_tuple) == 2
+
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('records')
+@pytest.mark.usefixtures('recover_tasklet')
+@pytest.mark.incremental
+class TestRecordsData(object):
+    def is_valid_ip(self, address):
+        """Verifies if it is a valid IP and if its accessible
+
+        Args:
+            address (str): IP address
+
+        Returns:
+            boolean
+        """
+        try:
+            socket.inet_aton(address)
+        except socket.error:
+            return False
+        else:
+            return True
+
+
+    @pytest.mark.feature("recovery")
+    def test_tasklets_recovery(self, mgmt_session, proxy, recover_tasklet):
+        """Test the recovery feature of tasklets
+
+        Triggers the vcrash and waits till the system is up
+        """
+        RECOVERY = "RESTART"
+
+        def vcrash(comp):
+            rpc_ip = RwVcsYang.VCrashInput.from_dict({"instance_name": comp})
+            proxy(RwVcsYang).rpc(rpc_ip)
+
+        tasklet_name = r'^{}-.*'.format(recover_tasklet)
+
+        vcs_info = proxy(RwBaseYang).get("/vcs/info/components")
+        for comp in vcs_info.component_info:
+            if comp.recovery_action == RECOVERY and \
+               re.match(tasklet_name, comp.instance_name):
+                vcrash(comp.instance_name)
+
+        time.sleep(60)
+
+        rift.vcs.vcs.wait_until_system_started(mgmt_session)
+        # NSM tasklet takes a couple of seconds to set up the python structure
+        # so sleep and then continue with the tests.
+        time.sleep(60)
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    def test_nsd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the NSR with the actual number of NSRs
+        """
+        nsd_ref_xpath = "/ns-instance-opdata/nsd-ref-count"
+        nsd_refs = proxy(RwNsrYang).get(nsd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for nsd_ref in nsd_refs.nsd_ref_count:
+            expected_ref_count[nsd_ref.nsd_id_ref] = nsd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            actual_ref_count[nsd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_vnfd_ref_count(self, proxy):
+        """
+        Asserts
+        1. The ref count data of the VNFR with the actual number of VNFRs
+        """
+        vnfd_ref_xpath = "/vnfr-catalog/vnfd-ref-count"
+        vnfd_refs = proxy(RwVnfrYang).get(vnfd_ref_xpath, list_obj=True)
+
+        expected_ref_count = collections.defaultdict(int)
+        for vnfd_ref in vnfd_refs.vnfd_ref_count:
+            expected_ref_count[vnfd_ref.vnfd_id_ref] = vnfd_ref.instance_ref_count
+
+        actual_ref_count = collections.defaultdict(int)
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            actual_ref_count[vnfd.id] += 1
+
+        assert expected_ref_count == actual_ref_count
+
+    def test_nsr_nsd_records(self, proxy):
+        """
+        Verifies the correctness of the NSR record using its NSD counter-part
+
+        Asserts:
+        1. The count of vnfd and vnfr records
+        2. Count of connection point descriptor and records
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            assert nsd.name == nsr.nsd_name_ref
+            assert len(nsd.constituent_vnfd) == len(nsr.constituent_vnfr_ref)
+
+            assert len(nsd.vld) == len(nsr.vlr)
+            for vnfd_conn_pts, vnfr_conn_pts in zip(nsd.vld, nsr.vlr):
+                assert len(vnfd_conn_pts.vnfd_connection_point_ref) == \
+                       len(vnfr_conn_pts.vnfr_connection_point_ref)
+
+    def test_vdu_record_params(self, proxy):
+        """
+        Asserts:
+        1. If a valid floating IP has been assigned to the VM
+        2. Count of VDUD and the VDUR
+        3. Check if the VM flavor has been copied over the VDUR
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port
+            assert len(vnfd.vdu) == len(vnfr.vdur)
+
+            for vdud, vdur in zip(vnfd.vdu, vnfr.vdur):
+                assert vdud.vm_flavor == vdur.vm_flavor
+                assert self.is_valid_ip(vdur.management_ip) is True
+                assert vdud.external_interface[0].vnfd_connection_point_ref == \
+                    vdur.external_interface[0].vnfd_connection_point_ref
+
+    def test_external_vl(self, proxy):
+        """
+        Asserts:
+        1. Valid IP for external connection point
+        2. A valid external network fabric
+        3. Connection point names are copied over
+        4. Count of VLD and VLR
+        5. Checks for a valid subnet ?
+        6. Checks for the operational status to be running?
+        """
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            cp_des, cp_rec = vnfd.connection_point, vnfr.connection_point
+
+            assert len(cp_des) == len(cp_rec)
+            assert cp_des[0].name == cp_rec[0].name
+            assert self.is_valid_ip(cp_rec[0].ip_address) is True
+
+            xpath = "/vlr-catalog/vlr[id='{}']".format(cp_rec[0].vlr_ref)
+            vlr = proxy(RwVlrYang).get(xpath)
+
+            assert len(vlr.network_id) > 0
+            assert len(vlr.assigned_subnet) > 0
+            ip, _ = vlr.assigned_subnet.split("/")
+            assert self.is_valid_ip(ip) is True
+            assert vlr.operational_status == "running"
+
+
+    def test_nsr_record(self, proxy):
+        """
+        Currently we only test for the components of NSR tests. Ignoring the
+        operational-events records
+
+        Asserts:
+        1. The constituent components.
+        2. Admin status of the corresponding NSD record.
+        """
+        for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
+            # 1 n/w and 2 connection points
+            assert len(nsr.vlr) == 1
+            assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2
+
+            assert len(nsr.constituent_vnfr_ref) == 2
+            assert nsr_cfg.admin_status == 'ENABLED'
+
+    def test_wait_for_pingpong_configured(self, proxy):
+        nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        assert len(nsrs) == 1
+        current_nsr = nsrs[0]
+
+        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+        proxy(RwNsrYang).wait_for(xpath, "configured", timeout=400)
+
+    def test_monitoring_params(self, proxy):
+        """
+        Asserts:
+        1. The value counter ticks?
+        2. If the meta fields are copied over
+        """
+        def mon_param_record(vnfr_id, mon_param_id):
+             return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format(
+                    vnfr_id, mon_param_id)
+
+        for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            for mon_des in (vnfd.monitoring_param):
+                mon_rec = mon_param_record(vnfr.id, mon_des.id)
+                mon_rec = proxy(VnfrYang).get(mon_rec)
+
+                # Meta data check
+                fields = mon_des.as_dict().keys()
+                for field in fields:
+                    assert getattr(mon_des, field) == getattr(mon_rec, field)
+                # Tick check
+                #assert mon_rec.value_integer > 0
+
+    def test_cm_nsr(self, proxy):
+        """
+        Asserts:
+            1. The ID of the NSR in cm-state
+            2. Name of the cm-nsr
+            3. The vnfr component's count
+            4. State of the cm-nsr
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr.ns_instance_config_ref)
+            con_data = proxy(RwConmanYang).get(con_nsr_xpath)
+
+            assert con_data.name == "ping_pong_nsd"
+            assert len(con_data.cm_vnfr) == 2
+
+            state_path = con_nsr_xpath + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+    def test_cm_vnfr(self, proxy):
+        """
+        Asserts:
+            1. The ID of Vnfr in cm-state
+            2. Name of the vnfr
+            3. State of the VNFR
+            4. Checks for a reachable IP in mgmt_interface
+            5. Basic checks for connection point and cfg_location.
+        """
+        def is_reachable(ip, timeout=10):
+            rc = subprocess.call(["ping", "-c1", "-w", str(timeout), ip])
+            if rc == 0:
+                return True
+            return False
+
+        nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+        con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr_cfg.id)
+
+        for _, vnfr in yield_vnfd_vnfr_pairs(proxy):
+            con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id)
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+            assert con_data is not None
+
+            state_path = con_vnfr_path + "/state"
+            proxy(RwConmanYang).wait_for(state_path, 'ready', timeout=120)
+
+            con_data = proxy(RwConmanYang).get(con_vnfr_path)
+            assert is_reachable(con_data.mgmt_interface.ip_address) is True
+
+            assert len(con_data.connection_point) == 1
+            connection_point = con_data.connection_point[0]
+            assert connection_point.name == vnfr.connection_point[0].name
+            assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+
+            assert con_data.cfg_location is not None
+
+@pytest.mark.depends('nsr')
+@pytest.mark.setup('nfvi')
+@pytest.mark.incremental
+class TestNfviMetrics(object):
+
+    def test_records_present(self, proxy):
+        assert_records(proxy)
+
+    @pytest.mark.skipif(True, reason='NFVI metrics collected from NSR are deprecated, test needs to be updated to collected metrics from VNFRs')
+    def test_nfvi_metrics(self, proxy):
+        """
+        Verify the NFVI metrics
+
+        Asserts:
+            1. Computed metrics, such as memory, cpu, storage and ports, match
+               with the metrics in NSR record. The metrics are computed from the
+               descriptor records.
+            2. Check if the 'utilization' field has a valid value (> 0) and matches
+               with the 'used' field, if available.
+        """
+        for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+            nfvi_metrics = nsr.nfvi_metrics
+            computed_metrics = collections.defaultdict(int)
+
+            # Get the constituent VNF records.
+            for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy, nsr):
+                vdu = vnfd.vdu[0]
+                vm_spec = vdu.vm_flavor
+                computed_metrics['vm'] += 1
+                computed_metrics['memory'] += vm_spec.memory_mb * (10**6)
+                computed_metrics['storage'] += vm_spec.storage_gb * (10**9)
+                computed_metrics['vcpu'] += vm_spec.vcpu_count
+                computed_metrics['external_ports'] += len(vnfd.connection_point)
+                computed_metrics['internal_ports'] += len(vdu.internal_connection_point)
+
+            assert nfvi_metrics.vm.active_vm == computed_metrics['vm']
+
+            # Availability checks
+            for metric_name in computed_metrics:
+                metric_data = getattr(nfvi_metrics, metric_name)
+                total_available = getattr(metric_data, 'total', None)
+
+                if total_available is not None:
+                    assert computed_metrics[metric_name] == total_available
+
+            # Utilization checks
+            for metric_name in ['memory', 'storage', 'vcpu']:
+                metric_data = getattr(nfvi_metrics, metric_name)
+
+                utilization = metric_data.utilization
+                # assert utilization > 0
+
+                # If used field is available, check if it matches with utilization!
+                total = metric_data.total
+                used = getattr(metric_data, 'used', None)
+                if used is not None:
+                    assert total > 0
+                    computed_utilization = round((used/total) * 100, 2)
+                    assert abs(computed_utilization - utilization) <= 0.1
+
+
+
+@pytest.mark.depends('nfvi')
+@pytest.mark.incremental
+class TestRecordsDescriptors:
+    def test_create_update_vnfd(self, proxy, updated_ping_pong_records):
+        """
+        Verify VNFD related operations
+
+        Asserts:
+            If a VNFD record is created
+        """
+        ping_vnfd, pong_vnfd, _ = updated_ping_pong_records
+        vnfdproxy = proxy(RwVnfdYang)
+
+        for vnfd_record in [ping_vnfd, pong_vnfd]:
+            xpath = "/vnfd-catalog/vnfd"
+            vnfdproxy.create_config(xpath, vnfd_record.vnfd)
+
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd = vnfdproxy.get(xpath)
+            assert vnfd.id == vnfd_record.id
+
+            vnfdproxy.replace_config(xpath, vnfd_record.vnfd)
+
+    def test_create_update_nsd(self, proxy, updated_ping_pong_records):
+        """
+        Verify NSD related operations
+
+        Asserts:
+            If NSD record was created
+        """
+        _, _, ping_pong_nsd = updated_ping_pong_records
+        nsdproxy = proxy(NsdYang)
+
+        xpath = "/nsd-catalog/nsd"
+        nsdproxy.create_config(xpath, ping_pong_nsd.descriptor)
+
+        xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id)
+        nsd = nsdproxy.get(xpath)
+        assert nsd.id == ping_pong_nsd.id
+
+        nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor)
+
diff --git a/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py b/rwlaunchpad/ra/pytest/ns/pingpong/test_scaling.py
new file mode 100644 (file)
index 0000000..0878db7
--- /dev/null
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_scaling.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/13/2016
+@brief Pingpong scaling system test
+"""
+
+import os
+import pytest
+import subprocess
+import sys
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+from gi.repository import (
+    NsrYang,
+    NsdYang,
+    VnfrYang,
+    RwNsrYang,
+    RwNsdYang,
+    RwVnfrYang,
+)
+
+@pytest.mark.setup('pingpong_nsd')
+@pytest.mark.depends('launchpad')
+class TestSetupPingpongNsd(object):
+    def test_onboard(self, mgmt_session, descriptors):
+        for descriptor in descriptors:
+            rift.auto.descriptor.onboard(mgmt_session.host, descriptor)
+
+    def test_install_sar(self, mgmt_session):
+        install_cmd = 'ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
+                mgmt_ip=mgmt_session.host,
+        )
+        subprocess.check_call(install_cmd, shell=True)
+
+
+@pytest.fixture(scope='function', params=[5,10,15,20,25])
+def service_count(request):
+    '''Fixture representing the number of services to test'''
+    return request.param
+
+@pytest.mark.depends('pingpong_nsd')
+class TestScaling(object):
+    @pytest.mark.preserve_fixture_order
+    def test_scaling(self, mgmt_session, cloud_account_name, service_count):
+
+        def start_services(mgmt_session, desired_service_count, max_attempts=3): 
+            catalog = mgmt_session.proxy(NsdYang).get_config('/nsd-catalog')
+            nsd = catalog.nsd[0]
+            
+            nsr_path = "/ns-instance-config"
+            nsr = mgmt_session.proxy(RwNsrYang).get_config(nsr_path)
+            service_count = len(nsr.nsr)
+
+            attempts = 0
+            while attempts < max_attempts and service_count < desired_service_count:
+                attempts += 1
+
+                for count in range(service_count, desired_service_count):
+                    nsr = rift.auto.descriptor.create_nsr(
+                        cloud_account_name,
+                        "pingpong_%s" % str(uuid.uuid4().hex[:10]),
+                        nsd.id)
+                    mgmt_session.proxy(RwNsrYang).create_config('/ns-instance-config/nsr', nsr)
+
+                ns_instance_opdata = mgmt_session.proxy(RwNsrYang).get('/ns-instance-opdata')
+                for nsr in ns_instance_opdata.nsr:
+                    try:
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=180)
+                        xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(nsr.ns_instance_config_ref)
+                        mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
+                        service_count += 1
+                    except rift.auto.session.ProxyWaitForError:
+                        mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.ns_instance_config_ref))
+
+        def monitor_launchpad_performance(service_count, interval=30, samples=1):
+            sar_cmd = "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
+                    mgmt_ip=mgmt_session.host,
+                    interval=interval,
+                    samples=samples
+            )
+            output = subprocess.check_output(sar_cmd, shell=True, stderr=subprocess.STDOUT)
+            outfile = '{rift_artifacts}/scaling_{task_id}.log'.format(
+                    rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                    task_id=os.environ.get('AUTO_TASK_ID')
+            )
+            with open(outfile, 'a') as fh:
+                message = '''
+== SCALING RESULTS : {service_count} Network Services ==
+{output}               
+                '''.format(service_count=service_count, output=output.decode())
+                fh.write(message)
+
+        start_services(mgmt_session, service_count)
+        monitor_launchpad_performance(service_count, interval=30, samples=1)
+
+@pytest.mark.depends('pingpong_nsd')
+@pytest.mark.teardown('pingpong_nsd')
+class TestTeardownPingpongNsr(object):
+    def test_teardown_nsr(self, mgmt_session):
+
+        ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/ns-instance-config')
+        for nsr in ns_instance_config.nsr:
+            mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.id))
+
+        time.sleep(60)
+        vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/vnfr-catalog')
+        assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+    def test_generate_plots(self):
+        plot_commands = [
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_cpu_{task_id}.png" '
+                    '--title "CPU Utilization by network service count" '
+                    '--keys CPU '
+                    '--fields %usr,%idle,%sys '
+                    '--key-filter CPU:all '
+                    '--ylabel "CPU Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mem_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields kbmemfree,kbmemused,kbbuffers,kbcached,kbcommit,kbactive,kbinact,kbdirty '
+                    '--ylabel "Memory Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_mempct_{task_id}.png" '
+                    '--title "Memory Utilization by network service count" '
+                    '--fields %memused,%commit '
+                    '--ylabel "Memory Utilization %" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_{task_id}.png" '
+                    '--title "Interface Utilization by network service count" '
+                    '--keys IFACE '
+                    '--fields rxpck/s,txpck/s,rxkB/s,txkB/s,rxcmp/s,txcmp/s,rxmcst/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Utilization" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+            ('python {rift_install}/usr/rift/systemtest/util/sarplot.py '
+                    '--plot "{rift_artifacts}/scaling_iface_err_{task_id}.png" '
+                    '--title "Interface Errors by network service count" '
+                    '--keys IFACE '
+                    '--fields rxerr/s,txerr/s,coll/s,rxdrop/s,txdrop/s,txcarr/s,rxfram/s,rxfifo/s,txfifo/s '
+                    '--key-filter IFACE:eth0 '
+                    '--ylabel "Interface Errors" '
+                    '--xlabel "Network Service Count" '
+                    '--xticklabels "5,10,15,20,25" < {rift_artifacts}/scaling_{task_id}.log'
+            ),
+        ]
+
+        for cmd in plot_commands:
+            subprocess.check_call(
+                    cmd.format(
+                        rift_install=os.environ.get('RIFT_INSTALL'),
+                        rift_artifacts=os.environ.get('RIFT_ARTIFACTS'),
+                        task_id=os.environ.get('AUTO_TASK_ID')
+                    ),
+                    shell=True
+            )
+
diff --git a/rwlaunchpad/ra/pytest/ns/test_onboard.py b/rwlaunchpad/ra/pytest/ns/test_onboard.py
new file mode 100644 (file)
index 0000000..5951ce8
--- /dev/null
@@ -0,0 +1,408 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_onboard.py
+@author Varun Prasad (varun.prasad@riftio.com)
+@brief Onboard descriptors
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import shutil
+import subprocess
+import time
+import uuid
+
+import rift.auto.mano
+import rift.auto.session
+
+import gi
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+
+from gi.repository import (
+    RwcalYang,
+    NsdYang,
+    RwNsrYang,
+    RwVnfrYang,
+    NsrYang,
+    VnfrYang,
+    VldYang,
+    RwVnfdYang,
+    RwLaunchpadYang,
+    RwBaseYang
+)
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfrYang)
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope="module")
+def endpoint():
+    return "upload"
+
+def create_nsr(nsd, input_param_list, cloud_account_name):
+    """
+    Create the NSR record object
+
+    Arguments:
+         nsd              -  NSD
+         input_param_list - list of input-parameter objects
+
+    Return:
+         NSR object
+    """
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+    nsr.id = str(uuid.uuid4())
+    nsr.name = rift.auto.mano.resource_name(nsr.id)
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd.from_dict(nsd.as_dict())
+    nsr.admin_status = "ENABLED"
+    nsr.input_parameter.extend(input_param_list)
+    nsr.cloud_account = cloud_account_name
+
+    return nsr
+
+
+def upload_descriptor(
+        logger,
+        descriptor_file,
+        scheme,
+        cert,
+        host="127.0.0.1",
+        endpoint="upload"):
+    curl_cmd = ('curl --cert {cert} --key {key} -F "descriptor=@{file}" -k '
+                '{scheme}://{host}:4567/api/{endpoint}'.format(
+            cert=cert[0],
+            key=cert[1],
+            scheme=scheme,
+            endpoint=endpoint,
+            file=descriptor_file,
+            host=host,
+            ))
+
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_onboard_transaction_finished(
+        logger,
+        transaction_id,
+        scheme,
+        cert,
+        timeout=600,
+        host="127.0.0.1",
+        endpoint="upload"):
+
+    logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
+    uri = '%s://%s:4567/api/%s/%s/state' % (scheme, host, endpoint, transaction_id)
+
+    elapsed = 0
+    start = time.time()
+    while elapsed < timeout:
+        reply = requests.get(uri, cert=cert, verify=False)
+        state = reply.json()
+        if state["status"] == "success":
+            break
+        if state["status"] != "pending":
+            raise DescriptorOnboardError(state)
+
+        time.sleep(1)
+        elapsed = time.time() - start
+
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+    logger.info("Descriptor onboard was successful")
+
+
+def onboard_descriptor(host, file_name, logger, endpoint, scheme, cert):
+    """On-board/update the descriptor.
+
+    Args:
+        host (str): Launchpad IP
+        file_name (str): Full file path.
+        logger: Logger instance
+        endpoint (str): endpoint to be used for the upload operation.
+
+    """
+    logger.info("Onboarding package: %s", file_name)
+    trans_id = upload_descriptor(
+            logger,
+            file_name,
+            scheme,
+            cert,
+            host=host,
+            endpoint=endpoint)
+    wait_onboard_transaction_finished(
+        logger,
+        trans_id,
+        scheme,
+        cert,
+        host=host,
+        endpoint=endpoint)
+
+def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=True):
+    """
+    Terminate the instance and check if the record is deleted.
+
+    Asserts:
+    1. NSR record is deleted from instance-config.
+
+    """
+    logger.debug("Terminating NSRs")
+
+    nsr_path = "/ns-instance-config"
+    nsr = rwnsr_proxy.get_config(nsr_path)
+    nsrs = nsr.nsr
+
+    xpaths = []
+    for nsr in nsrs:
+        xpath = "/ns-instance-config/nsr[id='{}']".format(nsr.id)
+        rwnsr_proxy.delete_config(xpath)
+        xpaths.append(xpath)
+
+    if wait_after_kill:
+        time.sleep(30)
+    else:
+        time.sleep(5)
+
+    for xpath in xpaths:
+        nsr = rwnsr_proxy.get_config(xpath)
+        assert nsr is None
+
+    # Get the ns-instance-config
+    ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+
+    # Termination tests
+    vnfr = "/vnfr-catalog/vnfr"
+    vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
+    assert vnfrs is None or len(vnfrs.vnfr) == 0
+
+    # nsr = "/ns-instance-opdata/nsr"
+    # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
+    # assert len(nsrs.nsr) == 0
+
+
+
+@pytest.mark.setup('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrStart(object):
+    """A brief overview of the steps performed.
+    1. Generate & on-board new descriptors
+    2. Start the NSR 
+    """
+
+    def test_upload_descriptors(
+            self,
+            logger,
+            vnfd_proxy,
+            nsd_proxy,
+            mgmt_session,
+            scheme,
+            cert,
+            descriptors
+        ):
+        """Generates & On-boards the descriptors.
+        """
+        endpoint = "upload"
+
+        for file_name in descriptors:
+            onboard_descriptor(
+                    mgmt_session.host,
+                    file_name,
+                    logger,
+                    endpoint,
+                    scheme,
+                    cert)
+
+        descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        actual_vnfds = catalog.vnfd
+        assert len(actual_vnfds) == len(descriptor_vnfds), \
+                "There should {} vnfds".format(len(descriptor_vnfds))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        actual_nsds = catalog.nsd
+        assert len(actual_nsds) == 1, "There should only be a single nsd"
+
+    @pytest.mark.feature("upload-image")
+    def test_upload_images(self, descriptor_images, cloud_host, cloud_user, cloud_tenants):
+
+        openstack = rift.auto.mano.OpenstackManoSetup(
+                cloud_host,
+                cloud_user,
+                [(tenant, "private") for tenant in cloud_tenants])
+
+        for image_location in descriptor_images:
+            image = RwcalYang.ImageInfoItem.from_dict({
+                    'name': os.path.basename(image_location),
+                    'location': image_location,
+                    'disk_format': 'qcow2',
+                    'container_format': 'bare'})
+            openstack.create_image(image)
+
+
+    def test_set_scaling_params(self, nsd_proxy):
+        nsds = nsd_proxy.get('/nsd-catalog')
+        nsd = nsds.nsd[0]
+        for scaling_group in nsd.scaling_group_descriptor:
+            scaling_group.max_instance_count = 2
+
+        nsd_proxy.replace_config('/nsd-catalog/nsd[id="{}"]'.format(
+            nsd.id), nsd)
+
+
+    def test_instantiate_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
+
+        def verify_input_parameters(running_config, config_param):
+            """
+            Verify the configured parameter set against the running configuration
+            """
+            for run_input_param in running_config.input_parameter:
+                if (run_input_param.xpath == config_param.xpath and
+                    run_input_param.value == config_param.value):
+                    return True
+
+            assert False, ("Verification of configured input parameters: { xpath:%s, value:%s} "
+                          "is unsuccessful.\nRunning configuration: %s" % (config_param.xpath,
+                                                                           config_param.value,
+                                                                           running_config.input_parameter))
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        input_parameters = []
+        descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+        descr_value = "New NSD Description"
+        in_param_id = str(uuid.uuid4())
+
+        input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                                                                xpath=descr_xpath,
+                                                                value=descr_value)
+
+        input_parameters.append(input_param_1)
+
+        nsr = create_nsr(nsd, input_parameters, cloud_account_name)
+
+        logger.info("Instantiating the Network Service")
+        rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+        assert nsr_opdata is not None
+
+        # Verify the input parameter configuration
+        running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+        for input_param in input_parameters:
+            verify_input_parameters(running_config, input_param)
+
+    def test_wait_for_nsr_started(self, rwnsr_proxy):
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+
+        for nsr in nsrs:
+            xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+            rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=240)
+
+
+@pytest.mark.teardown('nsr')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestNsrTeardown(object):
+    def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type):
+        """
+        Terminate the instance and check if the record is deleted.
+
+        Asserts:
+        1. NSR record is deleted from instance-config.
+
+        """
+        logger.debug("Terminating NSR")
+
+        wait_after_kill = True
+        if cloud_type == "mock":
+            wait_after_kill = False
+
+        terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=wait_after_kill)
+
+    def test_delete_records(self, nsd_proxy, vnfd_proxy):
+        """Delete the NSD & VNFD records
+
+        Asserts:
+            The records are deleted.
+        """
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        for nsd in nsds.nsd:
+            xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+            nsd_proxy.delete_config(xpath)
+
+        nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+        assert nsds is None or len(nsds.nsd) == 0
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        for vnfd_record in vnfds.vnfd:
+            xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+            vnfd_proxy.delete_config(xpath)
+
+        vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+        assert vnfds is None or len(vnfds.vnfd) == 0
diff --git a/rwlaunchpad/ra/pytest/test_failover.py b/rwlaunchpad/ra/pytest/test_failover.py
new file mode 100755 (executable)
index 0000000..40dd7d0
--- /dev/null
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_failover.py
+@brief System test of stopping launchpad on master and
+validating configuration on standby
+"""
+import os
+import sys
+import time
+import argparse
+import subprocess
+
+import gi
+from gi.repository import RwVnfdYang
+from gi.repository import RwVnfrYang
+
+import rift.auto.proxy
+from rift.auto.session import NetconfSession
+
+def yield_vnfd_vnfr_pairs(proxy, nsr=None):
+    """
+    Yields tuples of vnfd & vnfr entries.
+
+    Args:
+        proxy (callable): Launchpad proxy
+        nsr (optional): If specified, only the vnfr & vnfd records of the NSR
+                are returned
+
+    Yields:
+        Tuple: VNFD and its corresponding VNFR entry
+    """
+    def get_vnfd(vnfd_id):
+        xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
+        return proxy(RwVnfdYang).get(xpath)
+
+    vnfr = "/vnfr-catalog/vnfr"
+    print ("START")
+    vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+    print ("STOP")
+    for vnfr in vnfrs.vnfr:
+
+        if nsr:
+            const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+            if vnfr.id not in const_vnfr_ids:
+                continue
+
+        vnfd = get_vnfd(vnfr.vnfd_ref)
+        yield vnfd, vnfr
+
+def check_configuration_on_standby(standby_ip):
+    print ("Start- check_configuration_on_standby")
+    mgmt_session = NetconfSession(standby_ip)
+    mgmt_session.connect()
+    print ("Connected to proxy")
+
+    vnf_tuple = list(yield_vnfd_vnfr_pairs(mgmt_session.proxy))
+    assert len(vnf_tuple) == 2
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Test launchpad failover') 
+    parser.add_argument("--master-ip", action="store", dest="master_ip")
+    parser.add_argument("--standby-ip", action="store", dest="standby_ip")
+
+    args = parser.parse_args()
+
+    # 60 seconds should be more than enough time for Agent to be able
+    # to make confd as the new Master
+    time.sleep(60)
+    print ("Try fetching configuration from the old standby or the new Master\n")
+    check_configuration_on_standby(args.standby_ip)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad.py b/rwlaunchpad/ra/pytest/test_launchpad.py
new file mode 100644 (file)
index 0000000..81f5b54
--- /dev/null
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file test_launchpad.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/07/2016
+@brief System test of basic launchpad functionality
+"""
+
+import pytest
+
+import gi
+gi.require_version('RwsdnYang', '1.0')
+
+from gi.repository import RwsdnYang
+
+@pytest.mark.setup('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnSetup:
+    def test_create_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Configure sdn account
+
+        Asserts:
+            SDN name and accout type.
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        sdn_account = RwsdnYang.SDNAccount(
+                name=sdn_account_name,
+                account_type=sdn_account_type)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.create_config(xpath, sdn_account)
+        sdn_account = proxy.get(xpath)
+
+@pytest.mark.depends('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdn:
+    def test_show_odl_sdn_account(self, mgmt_session, sdn_account_name, sdn_account_type):
+        '''Showing sdn account configuration
+
+        Asserts:
+            sdn_account.account_type is what was configured
+        '''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        sdn_account = proxy.get_config(xpath)
+        assert sdn_account.account_type == sdn_account_type
+
+@pytest.mark.teardown('sdn')
+@pytest.mark.feature('sdn')
+@pytest.mark.incremental
+class TestSdnTeardown:
+    def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
+        '''Unconfigure sdn account'''
+        proxy = mgmt_session.proxy(RwsdnYang)
+        xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+        proxy.delete_config(xpath)
+
+
+@pytest.mark.setup('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadSetup:
+    def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Configure cloud accounts
+
+        Asserts:
+            Cloud name and cloud type details
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name)
+            proxy.replace_config(xpath, cloud_account)
+            response =  proxy.get(xpath)
+            assert response.name == cloud_account.name
+            assert response.account_type == cloud_account.account_type
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpad:
+    def test_account_connection_status(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Verify connection status on each cloud account
+
+        Asserts:
+            Cloud account is successfully connected
+        '''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            proxy.wait_for(
+                '{}[name="{}"]/connection-status/status'.format(cloud_xpath, cloud_account.name),
+                'success',
+                timeout=30,
+                fail_on=['failure'])
+
+
+@pytest.mark.teardown('launchpad')
+@pytest.mark.usefixtures('cloud_account')
+@pytest.mark.incremental
+class TestLaunchpadTeardown:
+    def test_delete_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+        '''Unconfigure cloud_account'''
+        proxy = mgmt_session.proxy(cloud_module)
+        for cloud_account in cloud_accounts:
+            xpath = "{}[name='{}']".format(cloud_xpath, cloud_account.name)
+            proxy.delete_config(xpath)
diff --git a/rwlaunchpad/ra/pytest/test_launchpad_longevity.py b/rwlaunchpad/ra/pytest/test_launchpad_longevity.py
new file mode 100644 (file)
index 0000000..c8a4662
--- /dev/null
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2016/01/04
+#
+
+import rift.vcs.vcs
+import time
+import gi
+
+def test_launchpad_longevity(mgmt_session, mgmt_domain_name):
+    time.sleep(60)
+    rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
diff --git a/rwlaunchpad/ra/pytest/test_start_standby.py b/rwlaunchpad/ra/pytest/test_start_standby.py
new file mode 100755 (executable)
index 0000000..cf0e5d9
--- /dev/null
@@ -0,0 +1,78 @@
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file  test_start_standby.py
+@brief This test starts the launchpad on a remote VM
+"""
+import argparse
+import sys
+import time
+import os
+import glob
+import subprocess
+import shlex
+import multiprocessing
+
+import rift.auto.session
+import rift.vcs.vcs
+
+def get_manifest_file():
+    artifacts_path = os.environ["RIFT_ARTIFACTS"]
+    manifest_files = glob.glob(artifacts_path + "/manifest*xml")
+    manifest_files.sort(key=lambda x: os.stat(x).st_mtime)
+    return manifest_files[0]
+
+def copy_manifest_to_remote(remote_ip, manifest_file):
+    print ("Copying manifest file {} to remote".format(manifest_file))
+    cmd = "scp {0} {1}:/tmp/manifest.xml".format(manifest_file, remote_ip)
+    print ("Running command: {}".format(cmd))
+    subprocess.check_call(cmd, shell=True)
+    
+
+def test_start_lp_remote(remote_ip):
+    rift_root = os.environ.get('HOME_RIFT', os.environ.get('RIFT_ROOT'))
+    rift_install = os.environ.get('RIFT_INSTALL')
+
+    copy_manifest_to_remote(remote_ip, get_manifest_file())
+
+    cmd_template = ("ssh_root {remote_ip} -q -o BatchMode=yes -o "
+    " UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -- "
+    " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -e -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
+      remote_ip=remote_ip,
+      rift_root=rift_root,
+      rift_install=rift_install)
+
+    def start_lp(cmd):
+        print ("Running cmd: {}".format(cmd))
+        subprocess.call(shlex.split(cmd))
+
+    print ("Starting launchpad on remote VM: {}".format(cmd_template))
+    p = multiprocessing.Process(target=start_lp, args=(cmd_template,))
+    p.daemon = True
+    p.start()
+    print ("Standby system started")
+    time.sleep(60)
+    pass
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='Start standby LP')
+    parser.add_argument("--remote-ip", action="store", dest="remote_ip")
+
+    args = parser.parse_args()
+
+    test_start_lp_remote(args.remote_ip)
diff --git a/rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg b/rwlaunchpad/ra/racfg/multi_tenant_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..c9adde4
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_MULTI_TENANT_OPENSTACK",
+  "commandline":"./launchpad_systest --test-name 'TC_MULTI_TENANT_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --sysinfo",
+  "test_description":"System test for multiple tenants(Openstack)",
+  "required_tenants":2,
+  "run_as_root": false,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 1800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg b/rwlaunchpad/ra/racfg/multivm_vnf_slb_systest.racfg
new file mode 100644 (file)
index 0000000..2294b91
--- /dev/null
@@ -0,0 +1,17 @@
+{
+  "test_name":"TC_MULTI_VM_VNF_SLB",
+  "commandline":"./multi_vm_vnf_slb_systest.sh --test-name 'TC_MULTI_VM_VNF_SLB' --cloud-type openstack --cloud-host={cloud_host}  --user={user} {tenants}",
+  "test_description":"System test for scriptable load balancer with Multi-VMs VNFs",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
diff --git a/rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg b/rwlaunchpad/ra/racfg/multivm_vnf_trafgen_systest.racfg
new file mode 100755 (executable)
index 0000000..3879146
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_MULTI_VM_VNF_TRAFGEN",
+  "commandline":"./multi_vm_vnf_trafgen_systest.sh --test-name 'TC_MULTI_VM_VNF_TRAFGEN' --cloud-type openstack --cloud-host={cloud_host}  --user={user}  {tenants}",
+  "test_description":"System test for trafgen application with Multi-VMs VNFs",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_lp_ha_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..2887649
--- /dev/null
@@ -0,0 +1,23 @@
+{
+  "test_name":"TC_PINGPONG_LP_HA_OPENSTACK",
+  "commandline":"./pingpong_lp_ha_systest --test-name 'TC_PINGPONG_LP_HA_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --ha-mode LS --expanded",
+  "test_description":"System test for standalone Launchpad (Openstack) with High availability",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    },
+    {
+      "name": "rift_auto_launchpad_standby",
+      "memory":4096,
+      "cpus":2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_cloudsim.racfg
new file mode 100644 (file)
index 0000000..25e969f
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_CLOUDSIM",
+  "commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_CLOUDSIM' --sysinfo --netconf --restconf",
+  "test_description":"System test for ping and pong vnf (Cloudsim)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "target_vm":"rift_auto_launchpad",
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 16384,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..62940eb
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_OPENSTACK",
+  "commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo  --user={user} {tenants} --netconf --restconf",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg b/rwlaunchpad/ra/racfg/pingpong_records_systest_openstack_xml.racfg
new file mode 100644 (file)
index 0000000..76b7c66
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_RECORDS_OPENSTACK_XML",
+  "commandline":"./pingpong_records_systest  --test-name 'TC_PINGPONG_RECORDS_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2600,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_scaling_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..7d6b30e
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_SCALING_OPENSTACK",
+  "commandline":"./pingpong_scaling_systest --cloud-type 'openstack' --cloud-host={cloud_host}  --user={user} {tenants}",
+  "test_description":"Scaling system test for ping and pong vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..2f4388d
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK",
+  "commandline":"./pingpong_vnf_reload_systest  --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK' --cloud-type 'openstack' --sysinfo --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping pong vnf reload(Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
new file mode 100644 (file)
index 0000000..ce44c75
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML",
+  "commandline":"./pingpong_vnf_reload_systest  --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
+  "test_description":"System test for ping pong vnf reload(Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_cloudsim.racfg
new file mode 100644 (file)
index 0000000..c2f8f0c
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_PINGPONG_VNF_CLOUDSIM",
+  "commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_CLOUDSIM'",
+  "target_vm":"VM",
+  "test_description":"System test for ping and pong vnf",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"],
+  "timelimit": 1800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg b/rwlaunchpad/ra/racfg/pingpong_vnf_systest_openstack.racfg
new file mode 100644 (file)
index 0000000..91cd1ad
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_PINGPONG_VNF_OPENSTACK",
+  "commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host}  --user={user} {tenants} --sysinfo",
+  "test_description":"System test for ping and pong vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/recovery_systest.racfg b/rwlaunchpad/ra/racfg/recovery_systest.racfg
new file mode 100644 (file)
index 0000000..6d0db13
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_TASKLET_RECOVERY_OPENSTACK",
+  "commandline":"./pingpong_recovery_systest --test-name 'TC_TASKLET_RECOVERY_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo  --user={user} {tenants} --netconf",
+  "test_description":"System test for testing the DTS recovery feature of tasklets (Openstack)",
+  "run_as_root": false,
+  "status":"working",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 8192,
+      "cpus": 4
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/racfg/scaling_systest.racfg b/rwlaunchpad/ra/racfg/scaling_systest.racfg
new file mode 100644 (file)
index 0000000..2d8744d
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "test_name":"TC_SCALING_OPENSTACK",
+  "commandline":"./scaling_systest --test-name 'TC_SCALING_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --tenant={tenant}  --sysinfo",
+  "test_description":"System test for scaling HAProxy vnf (Openstack)",
+  "run_as_root": false,
+  "status":"broken",
+  "keywords":["nightly","smoke","MANO","openstack"],
+  "timelimit": 2200,
+  "networks":[],
+  "vms":[
+    {
+      "name": "rift_auto_launchpad",
+      "memory": 4096,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/ra/scaling_systest b/rwlaunchpad/ra/scaling_systest
new file mode 100755 (executable)
index 0000000..bb37bf2
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/bash
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Varun Prasad
+# Creation Date: 2016/04/12
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+
+SCRIPT_TEST="py.test -v \
+            ${PYTEST_DIR}/system/test_launchpad.py \
+            ${PYTEST_DIR}/system/ns/test_onboard.py \
+            ${PYTEST_DIR}/system/ns/haproxy/test_scaling.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
diff --git a/rwlaunchpad/test/CMakeLists.txt b/rwlaunchpad/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..bd1a51e
--- /dev/null
@@ -0,0 +1,65 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 10/01/2015
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+install(
+  PROGRAMS
+    launchpad.py
+    DESTINATION demos
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  FILES
+    pytest/lp_test.py
+  DESTINATION
+    usr/rift/systemtest/pytest/launchpad
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  PROGRAMS
+    launchpad_recovery
+  DESTINATION
+    usr/rift/systemtest/launchpad
+  COMPONENT ${PKG_LONG_NAME}
+  )
+
+install(
+  PROGRAMS
+    launchpad
+  DESTINATION usr/bin
+  COMPONENT rwcal-1.0
+  )
+
+rift_py3test(utest_rwmonitor
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwmonitor.py
+  )
+
+rift_py3test(utest_rwnsm
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwnsm.py
+  )
+
+rift_py3test(tosca_ut
+  TEST_ARGS
+  ${CMAKE_CURRENT_SOURCE_DIR}/tosca_ut.py
+  )
diff --git a/rwlaunchpad/test/launchpad b/rwlaunchpad/test/launchpad
new file mode 100644 (file)
index 0000000..6e423ac
--- /dev/null
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+
+import argparse
+import contextlib
+import os
+import signal
+import subprocess
+import sys
+
+import gi
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwCal', '1.0')
+gi.require_version('RwLog', '1.0')
+
+
+TEST_PARSER = "test"
+
+
+class PyTestRunner:
+    SYS_CMD = "demos/launchpad.py -m ethsim --skip-prepare-vm -c"
+    CLOUDSIM_CMD = "cloudsim start"
+
+    @property
+    def rift_install(self):
+        return os.getenv('RIFT_INSTALL')
+
+    @property
+    def account_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/test_mission_control.py")
+
+    @property
+    def onboard_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/pingpong_vnf/test_onboard_vnf.py")
+
+    @property
+    def records_script(self):
+        return os.path.join(
+                self.rift_install,
+                "usr/rift/systemtest/pytest/mission_control/pingpong_vnf/test_records.py")
+
+    def run_cmd(self, scripts=None, cal_account="mock"):
+        scripts = scripts or [self.account_script, self.onboard_script]
+
+        cmd = "py.test -v "
+
+        # In mock-cal mode we don't need the images.
+        if cal_account == "mock":
+            cmd += "--{} --lp-standalone --network-service pingpong_noimg ".format(cal_account)
+        else:
+            cmd += "--{} --lp-standalone --network-service pingpong ".format(cal_account)
+
+        cmd += " ".join(scripts)
+        subprocess.call(cmd, shell=True)
+
+    @contextlib.contextmanager
+    def system_start(self, debug_mode=False, cal_account="mock"):
+
+
+        os.environ['LD_PRELOAD'] = os.path.join(
+                self.rift_install,
+                "usr/lib/rift/preloads/librwxercespreload.so")
+
+        sys_cmd = os.path.join(self.rift_install, self.SYS_CMD)
+        if debug_mode:
+            sys_cmd += " --mock-cli"
+
+        process = subprocess.Popen(
+            sys_cmd,
+            shell=True,
+            preexec_fn=os.setsid)
+
+        cloudsim_process = None
+        if cal_account == "lxc":
+            # If in LXC start the cloudsim server.
+            cloudsim_process = subprocess.Popen(
+                PyTestRunner.CLOUDSIM_CMD,
+                shell=True,
+                preexec_fn=os.setsid)
+
+        def kill():
+            os.killpg(process.pid, signal.SIGTERM)
+            if cloudsim_process:
+                os.killpg(cloudsim_process.pid, signal.SIGTERM)
+                cloudsim_process.wait()
+
+            process.wait()
+
+        signal.signal(signal.SIGHUP, kill)
+        signal.signal(signal.SIGTERM, kill)
+
+        yield
+
+        kill()
+
+
+def test_launchpad(args):
+    pytest = PyTestRunner()
+
+    scripts = None
+    if args.cal == "lxc":
+        scripts = [pytest.account_script, pytest.onboard_script, pytest.records_script]
+
+    with pytest.system_start(cal_account=args.cal):
+        pytest.run_cmd(scripts=scripts, cal_account=args.cal)
+
+
+def parse(arguments):
+    parser = argparse.ArgumentParser(description=__doc__,
+                                    formatter_class=argparse.RawDescriptionHelpFormatter)
+    parser.add_argument(
+            '--log-level', '-l',
+            default="WARNING",
+            type=str,
+            choices=["INFO", "DEBUG", "WARNING", "ERROR"],
+            help="Set log level, defaults to warning and above.")
+
+    subparsers = parser.add_subparsers()
+
+    start_parser = subparsers.add_parser(TEST_PARSER, help="Test the LP")
+    start_parser.add_argument(
+            '--cal', "-c",
+            help="Run the server in the foreground. The logs are sent to console.",
+            default="mock",
+            choices=["lxc", "mock"])
+    start_parser.set_defaults(which=TEST_PARSER)
+
+    args = parser.parse_args(arguments)
+
+    return args
+
+
+def main(args):
+
+    args = parse(args)
+
+    if args.which == TEST_PARSER:
+        test_launchpad(args)
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
\ No newline at end of file
diff --git a/rwlaunchpad/test/launchpad.py b/rwlaunchpad/test/launchpad.py
new file mode 100755 (executable)
index 0000000..239f91b
--- /dev/null
@@ -0,0 +1,520 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import resource
+import socket
+import sys
+import subprocess
+import shlex
+import shutil
+import netifaces
+
+from rift.rwlib.util import certs
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+import rift.vcs
+import rift.vcs.core as core
+import rift.vcs.demo
+import rift.vcs.vms
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+
+from rift.vcs.ext import ClassProperty
+
+logger = logging.getLogger(__name__)
+
+
+class NsmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='network-services-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a NsmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(NsmTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
+    plugin_name = ClassProperty('rwnsmtasklet')
+
+
+class VnsTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-service', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnsTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnsTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
+    plugin_name = ClassProperty('rwvnstasklet')
+
+
+class VnfmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a virtual network function manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-function-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnfmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnfmTasklet, self).__init__(name=name, uid=uid,
+                                          config_ready=config_ready,
+                                          recovery_action=recovery_action,
+                                          data_storetype=data_storetype,
+                                         )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
+    plugin_name = ClassProperty('rwvnfmtasklet')
+
+
+class ResMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Resource-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ResMgrTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ResMgrTasklet, self).__init__(name=name, uid=uid,
+                                            config_ready=config_ready,
+                                            recovery_action=recovery_action,
+                                            data_storetype=data_storetype,
+                                           )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
+    plugin_name = ClassProperty('rwresmgrtasklet')
+
+
+class ImageMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Image Manager tasklet.
+    """
+
+    def __init__(self, name='Image-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a Image Manager Tasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ImageMgrTasklet, self).__init__(
+                name=name, uid=uid,
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwimagemgrtasklet')
+    plugin_name = ClassProperty('rwimagemgrtasklet')
+
+
+class MonitorTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to monitor NFVI metrics.
+    """
+
+    def __init__(self, name='nfvi-metrics-monitor', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitorTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitorTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
+    plugin_name = ClassProperty('rwmonitor')
+
+class RedisServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.Redis.Server",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(RedisServer, self).__init__(
+                name=name,
+                exe="/usr/bin/redis-server",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./usr/bin/active_redis.conf --port 9999"
+
+
+class MonitoringParameterTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to generate monitoring
+    parameters.
+    """
+
+    def __init__(self, name='Monitoring-Parameter', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitoringParameterTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitoringParameterTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonparam')
+    plugin_name = ClassProperty('rwmonparam')
+
+
+class AutoscalerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to generate monitoring
+    parameters.
+    """
+
+    def __init__(self, name='Autoscaler', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitoringParameterTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(AutoscalerTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwautoscaler')
+    plugin_name = ClassProperty('rwautoscaler')
+
+
+def get_ui_ssl_args():
+    """Returns the SSL parameter string for launchpad UI processes"""
+
+    try:
+        use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+    except certs.BootstrapSslMissingException:
+        logger.error('No bootstrap certificates found.  Disabling UI SSL')
+        use_ssl = False
+
+    # If we're not using SSL, no SSL arguments are necessary
+    if not use_ssl:
+        return ""
+
+    return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
+
+
+class UIServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.MC.UI",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(UIServer, self).__init__(
+                name=name,
+                exe="./usr/share/rw.ui/skyquake/scripts/launch_ui.sh",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return get_ui_ssl_args()
+
+class ConfigManagerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Configuration-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ConfigManagerTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ConfigManagerTasklet, self).__init__(name=name, uid=uid,
+                                                   config_ready=config_ready,
+                                                   recovery_action=recovery_action,
+                                                   data_storetype=data_storetype,
+                                                  )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
+    plugin_name = ClassProperty('rwconmantasklet')
+
+class GlanceServer(rift.vcs.NativeProcess):
+    def __init__(self, name="glance-image-catalog",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(GlanceServer, self).__init__(
+                name=name,
+                exe="./usr/bin/glance_start_wrapper",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./etc/glance"
+
+
+class Demo(rift.vcs.demo.Demo):
+    def __init__(self, no_ui=False, ha_mode=None, mgmt_ip_list=[], test_name=None):
+        procs = [
+            ConfigManagerTasklet(),
+            GlanceServer(),
+            rift.vcs.DtsRouterTasklet(),
+            rift.vcs.MsgBrokerTasklet(),
+            rift.vcs.RestPortForwardTasklet(),
+            rift.vcs.RestconfTasklet(),
+            rift.vcs.RiftCli(),
+            rift.vcs.uAgentTasklet(),
+            rift.vcs.Launchpad(),
+            ]
+
+        standby_procs = [
+            RedisServer(),
+            rift.vcs.DtsRouterTasklet(),
+            rift.vcs.MsgBrokerTasklet(),
+            ]
+
+        datastore = core.DataStore.BDB.value
+        if ha_mode:
+            procs.append(RedisServer())
+            datastore = core.DataStore.REDIS.value
+
+        if not no_ui:
+            procs.append(UIServer())
+
+        restart_procs = [
+              VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              MonitoringParameterTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              ImageMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+              AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
+            ]
+
+        if not mgmt_ip_list or len(mgmt_ip_list) == 0:
+            mgmt_ip_list.append("127.0.0.1")
+
+        colony = rift.vcs.core.Colony(name='top', uid=1)
+
+        lead_lp_vm = rift.vcs.VirtualMachine(
+              name='vm-launchpad-1',
+              ip=mgmt_ip_list[0],
+              procs=procs,
+              restart_procs=restart_procs,
+            )
+        lead_lp_vm.leader = True
+        colony.append(lead_lp_vm)
+
+        if ha_mode:
+            stby_lp_vm = rift.vcs.VirtualMachine(
+                  name='launchpad-vm-2',
+                  ip=mgmt_ip_list[1],
+                  procs=standby_procs,
+                  start=False,
+                )
+            # WA to Agent mode_active flag reset
+            stby_lp_vm.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
+            colony.append(stby_lp_vm)
+
+        sysinfo = rift.vcs.SystemInfo(
+                    mode='ethsim',
+                    zookeeper=rift.vcs.manifest.RaZookeeper(master_ip=mgmt_ip_list[0]),
+                    colonies=[colony],
+                    multi_broker=True,
+                    multi_dtsrouter=True,
+                    mgmt_ip_list=mgmt_ip_list,
+                    test_name=test_name,
+                  )
+
+        super(Demo, self).__init__(
+            # Construct the system. This system consists of 1 cluster in 1
+            # colony. The master cluster houses CLI and management VMs
+            sysinfo = sysinfo,
+
+            # Define the generic portmap.
+            port_map = {},
+
+            # Define a mapping from the placeholder logical names to the real
+            # port names for each of the different modes supported by this demo.
+            port_names = {
+                'ethsim': {
+                },
+                'pci': {
+                }
+            },
+
+            # Define the connectivity between logical port names.
+            port_groups = {},
+        )
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s')
+
+    # Create a parser which includes all generic demo arguments
+    parser = rift.vcs.demo.DemoArgParser()
+    parser.add_argument("--no-ui", action='store_true')
+    args = parser.parse_args(argv)
+
+    # Disable loading any kernel modules for the launchpad VM
+    # since it doesn't need it and it will fail within containers
+    os.environ["NO_KERNEL_MODS"] = "1"
+
+    # Remove the persistent Redis data
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".aof") or f.endswith(".rdb"):
+           os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+    
+    # Remove the persistant DTS recovery files 
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".db"):
+            os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+    try:
+        shutil.rmtree(os.path.join(os.environ["INSTALLDIR"], "zk/server-1"))
+        shutil.rmtree(os.path.join(os.environ["INSTALLDIR"], "var/rift/tmp*"))
+    except:
+        pass
+
+    ha_mode = args.ha_mode
+    mgmt_ip_list = [] if not args.mgmt_ip_list else args.mgmt_ip_list
+
+    #load demo info and create Demo object
+    demo = Demo(args.no_ui, ha_mode, mgmt_ip_list, args.test_name)
+
+    # Create the prepared system from the demo
+    system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
+              northbound_listing="cli_launchpad_schema_listing.txt",
+              netconf_trace_override=True)
+
+    confd_ip = socket.gethostbyname(socket.gethostname())
+    intf = netifaces.ifaddresses('eth0')
+    if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]):
+       confd_ip = intf[netifaces.AF_INET][0]['addr']
+    rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
+
+    # Start the prepared system
+    system.start()
+
+
+if __name__ == "__main__":
+    resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+    try:
+        main()
+    except rift.vcs.demo.ReservationError:
+        print("ERROR: unable to retrieve a list of IP addresses from the reservation system")
+        sys.exit(1)
+    except rift.vcs.demo.MissingModeError:
+        print("ERROR: you need to provide a mode to run the script")
+        sys.exit(1)
+    finally:
+        os.system("stty sane")
diff --git a/rwlaunchpad/test/launchpad_recovery b/rwlaunchpad/test/launchpad_recovery
new file mode 100755 (executable)
index 0000000..eea5d4a
--- /dev/null
@@ -0,0 +1,793 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import unittest
+import re
+import psutil
+import types
+
+import xmlrunner
+
+import gi
+gi.require_version('RwDtsToyTaskletYang', '1.0')
+gi.require_version('RwManifestYang', '1.0')
+gi.require_version('RwVcsYang', '1.0')
+
+import gi.repository.RwManifestYang as rwmanifest
+import gi.repository.RwVcsYang as rwvcs
+import gi.repository.RwDtsToyTaskletYang as toyyang
+import gi.repository.RwYang as RwYang
+import rift.auto.session
+import rift.vcs.vcs
+
+import rift.tasklets
+import rift.test.dts
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+class LaunchPad(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+    def setUp(self):
+        """
+        1. Creates an asyncio loop
+        2. Triggers the hook configure_test
+        """
+        def scheduler_tick(self, *args):
+            self.call_soon(self.stop)
+            self.run_forever()
+
+        # Init params: loop & timers
+        self.loop = asyncio.new_event_loop()
+
+        self.loop.scheduler_tick = types.MethodType(scheduler_tick, self.loop)
+
+        self.asyncio_timer = None
+        self.stop_timer = None
+        self.__class__.id_cnt += 1
+        self.configure_test(self.loop, self.__class__.id_cnt)
+
+    @classmethod
+    def configure_schema(cls):
+        schema =  RwYang.Model.load_and_merge_schema(rwvcs.get_schema(), 'librwcal_yang_gen.so', 'Rwcal')
+        cls.model = RwYang.Model.create_libncx()
+        cls.model.load_schema_ypbc(schema)
+        xml = cls.manifest.to_xml_v2(cls.model, 1)
+        xml = re.sub('rw-manifest:', '', xml)
+        xml = re.sub('<manifest xmlns:rw-manifest="http://riftio.com/ns/riftware-1.0/rw-manifest">', '<?xml version="1.0" ?>\n<manifest xmlns="http://riftio.com/ns/riftware-1.0/rw-manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://riftio.com/ns/riftware-1.0/rw-manifest ./rw-manifest.xsd">', xml)
+        xml = '\n'.join(xml.split('\n')[1:]) 
+        with open('lptestmanifest.xml', 'w') as f:
+           f.write(str(xml))
+        f.close()
+        return schema
+
+
+    @classmethod
+    def configure_manifest(cls):
+        manifest = rwmanifest.Manifest()
+        manifest.bootstrap_phase = rwmanifest.BootstrapPhase.from_dict({
+            "rwmgmt": {
+                "northbound_listing": [ "cli_launchpad_schema_listing.txt" ]
+            }, 
+            "rwtasklet": {
+                "plugin_name": "rwinit-c"
+            }, 
+            "rwtrace": {
+                "enable": True, 
+                "level": 5, 
+            }, 
+            "log": {
+                "enable": True, 
+                "severity": 4, 
+                "bootstrap_time": 30, 
+                "console_severity": 4
+            }, 
+            "ip_addrs_list": [
+                {
+                    "ip_addr": "127.0.0.1", 
+                }
+            ], 
+            "zookeeper": {
+                "master_ip": "127.0.0.1", 
+                "unique_ports": False, 
+                "zake": False
+            }, 
+            "serf": {
+                "start": True
+            }, 
+            "rwvm": {
+                "instances": [
+                    {
+                        "component_name": "msgbroker", 
+                        "config_ready": True
+                    }, 
+                    {
+                        "component_name": "dtsrouter", 
+                        "config_ready": True
+                    }
+                ]
+            }, 
+#           "rwsecurity": {
+#               "use_ssl": True, 
+#               "cert": "/net/mahi/localdisk/kelayath/ws/coreha/etc/ssl/current.cert", 
+#               "key": "/net/mahi/localdisk/kelayath/ws/coreha/etc/ssl/current.key"
+#           }
+        }) 
+        manifest.init_phase = rwmanifest.InitPhase.from_dict({
+            "environment": {
+                "python_variable": [
+                    "vm_ip_address = '127.0.0.1'",
+                    "rw_component_name = 'vm-launchpad'",
+                    "instance_id = 1",
+                    "component_type = 'rwvm'",
+                ], 
+                "component_name": "$python(rw_component_name)", 
+                "instance_id": "$python(instance_id)", 
+                "component_type": "$python(rw_component_type)"
+            }, 
+            "settings": {
+                "rwmsg": {
+                    "multi_broker": {
+                        "enable": False
+                    }
+                }, 
+                "rwdtsrouter": {
+                    "multi_dtsrouter": {
+                        "enable": True
+                    }
+                }, 
+                "rwvcs": {
+                    "collapse_each_rwvm": False, 
+                    "collapse_each_rwprocess": False
+                }
+            }
+        }) 
+        manifest.inventory = rwmanifest.Inventory.from_dict({
+            "component": [
+                {
+                    "component_name": "master", 
+                    "component_type": "RWCOLLECTION", 
+                    "rwcollection": {
+                        "collection_type": "rwcolony", 
+                        "event_list": {
+                            "event": [{
+                                "name": "onentry", 
+                                "action": [{
+                                    "name": "Start vm-launchpad for master", 
+                                    "start": {
+                                        "python_variable": ["vm_ip_address = '127.0.0.1'"], 
+                                        "component_name": "vm-launchpad", 
+                                        "instance_id": "1", 
+                                        "config_ready": True
+                                    }
+                                }]
+                            }]
+                        }
+                    }
+                }, 
+                {
+                    "component_name": "vm-launchpad", 
+                    "component_type": "RWVM", 
+                    "rwvm": {
+                        "leader": True, 
+                        "event_list": {
+                            "event": [{
+                                "name": "onentry", 
+                                "action": [
+                                    {
+                                        "name": "Start the master", 
+                                        "start": {
+                                            "component_name": "master", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.CLI", 
+#                                       "start": {
+#                                           "component_name": "RW.CLI", 
+#                                           "recovery_action": "RESTART",
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_1.Restconf", 
+                                        "start": {
+                                            "component_name": "RW.Proc_1.Restconf", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.Proc_2.RestPortForward", 
+#                                       "start": {
+#                                           "component_name": "RW.Proc_2.RestPortForward", 
+#                                           "recovery_action": "RESTART",
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_3.CalProxy", 
+                                        "start": {
+                                            "component_name": "RW.Proc_3.CalProxy", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_4.nfvi-metrics-monitor", 
+                                        "start": {
+                                            "component_name": "RW.Proc_4.nfvi-metrics-monitor", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_5.network-services-manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_5.network-services-manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_6.virtual-network-function-manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_6.virtual-network-function-manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_7.virtual-network-service", 
+                                        "start": {
+                                            "component_name": "RW.Proc_7.virtual-network-service", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_8.nfvi-metrics-monitor", 
+                                        "start": {
+                                            "component_name": "RW.Proc_8.nfvi-metrics-monitor", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.MC.UI", 
+                                        "start": {
+                                            "component_name": "RW.MC.UI", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+#                                   {
+#                                       "name": "Start the RW.COMPOSER.UI", 
+#                                       "start": {
+#                                           "component_name": "RW.COMPOSER.UI", 
+#                                           "config_ready": True
+#                                       }
+#                                   }, 
+                                    {
+                                        "name": "Start the RW.Proc_10.launchpad", 
+                                        "start": {
+                                            "component_name": "RW.Proc_10.launchpad", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.Proc_11.Resource-Manager", 
+                                        "start": {
+                                            "component_name": "RW.Proc_11.Resource-Manager", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the RW.uAgent", 
+                                        "start": {
+                                            "python_variable": ["cmdargs_str = '--confd-proto AF_INET --confd-ip 127.0.0.1'"], 
+                                            "component_name": "RW.uAgent", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }, 
+                                    {
+                                        "name": "Start the logd", 
+                                        "start": {
+                                            "component_name": "logd", 
+                                            "recovery_action": "RESTART",
+                                            "config_ready": True
+                                        }
+                                    }
+                                ]
+                            }]
+                        }
+                    }
+                }, 
+#               {
+#                   "component_name": "RW.CLI", 
+#                   "component_type": "PROC", 
+#                   "native_proc": {
+#                       "exe_path": "./usr/bin/rwcli", 
+#                       "args": "--netconf_host 127.0.0.1 --netconf_port 2022 --schema_listing cli_launchpad_schema_listing.txt", 
+#                   }
+#               }, 
+                {
+                    "component_name": "RW.Proc_1.Restconf", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start RW.Restconf for RW.Proc_1.Restconf", 
+                            "component_name": "RW.Restconf", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.Restconf", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/restconf", 
+                        "plugin_name": "restconf"
+                    }
+                }, 
+#               {
+#                   "component_name": "RW.Proc_2.RestPortForward", 
+#                   "component_type": "RWPROC", 
+#                   "rwproc": {
+#                       "tasklet": [{
+#                           "name": "Start RW.RestPortForward for RW.Proc_2.RestPortForward", 
+#                           "component_name": "RW.RestPortForward", 
+#                           "recovery_action": "RESTART",
+#                           "config_ready": True
+#                       }]
+#                   }
+#               }, 
+#               {
+#                   "component_name": "RW.RestPortForward", 
+#                   "component_type": "RWTASKLET", 
+#                   "rwtasklet": {
+#                       "plugin_directory": "./usr/lib/rift/plugins/restportforward", 
+#                       "plugin_name": "restportforward"
+#                   }
+#               }, 
+                {
+                    "component_name": "RW.Proc_3.CalProxy", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start RW.CalProxy for RW.Proc_3.CalProxy", 
+                            "component_name": "RW.CalProxy", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.CalProxy", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwcalproxytasklet", 
+                        "plugin_name": "rwcalproxytasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_4.nfvi-metrics-monitor", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start nfvi-metrics-monitor for RW.Proc_4.nfvi-metrics-monitor", 
+                            "component_name": "nfvi-metrics-monitor", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "nfvi-metrics-monitor", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwmonitor", 
+                        "plugin_name": "rwmonitor"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_5.network-services-manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start network-services-manager for RW.Proc_5.network-services-manager", 
+                            "component_name": "network-services-manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "network-services-manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwnsmtasklet", 
+                        "plugin_name": "rwnsmtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_6.virtual-network-function-manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start virtual-network-function-manager for RW.Proc_6.virtual-network-function-manager", 
+                            "component_name": "virtual-network-function-manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "virtual-network-function-manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwvnfmtasklet", 
+                        "plugin_name": "rwvnfmtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_7.virtual-network-service", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start virtual-network-service for RW.Proc_7.virtual-network-service", 
+                            "component_name": "virtual-network-service", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "virtual-network-service", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwvnstasklet", 
+                        "plugin_name": "rwvnstasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_8.nfvi-metrics-monitor", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start nfvi-metrics-monitor for RW.Proc_8.nfvi-metrics-monitor", 
+                            "component_name": "nfvi-metrics-monitor", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "RW.MC.UI", 
+                    "component_type": "PROC", 
+                    "native_proc": {
+                        "exe_path": "./usr/share/rw.ui/skyquake/scripts/launch_ui.sh", 
+                    }
+                },
+                {
+                    "component_name": "RW.COMPOSER.UI",
+                    "component_type": "PROC", 
+                    "native_proc": {
+                        "exe_path": "./usr/share/composer/scripts/launch_composer.sh",
+                    }
+                },
+                {
+                    "component_name": "RW.Proc_9.Configuration-Manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start Configuration-Manager for RW.Proc_9.Configuration-Manager", 
+                            "component_name": "Configuration-Manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "Configuration-Manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwconmantasklet", 
+                        "plugin_name": "rwconmantasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_10.launchpad", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start launchpad for RW.Proc_10.launchpad", 
+                            "component_name": "launchpad", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "launchpad", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwlaunchpad", 
+                        "plugin_name": "rwlaunchpad"
+                    }
+                }, 
+                {
+                    "component_name": "RW.Proc_11.Resource-Manager", 
+                    "component_type": "RWPROC", 
+                    "rwproc": {
+                        "tasklet": [{
+                            "name": "Start Resource-Manager for RW.Proc_11.Resource-Manager", 
+                            "component_name": "Resource-Manager", 
+                            "recovery_action": "RESTART",
+                            "config_ready": True
+                        }]
+                    }
+                }, 
+                {
+                    "component_name": "Resource-Manager", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwresmgrtasklet", 
+                        "plugin_name": "rwresmgrtasklet"
+                    }
+                }, 
+                {
+                    "component_name": "RW.uAgent", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwuagent-c", 
+                        "plugin_name": "rwuagent-c"
+                    }
+                }, 
+                {
+                    "component_name": "logd", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwlogd-c", 
+                        "plugin_name": "rwlogd-c"
+                    }
+                }, 
+                {
+                    "component_name": "msgbroker", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwmsgbroker-c", 
+                        "plugin_name": "rwmsgbroker-c"
+                    }
+                }, 
+                {
+                    "component_name": "dtsrouter", 
+                    "component_type": "RWTASKLET", 
+                    "rwtasklet": {
+                        "plugin_directory": "./usr/lib/rift/plugins/rwdtsrouter-c", 
+                        "plugin_name": "rwdtsrouter-c"
+                    }
+                }
+            ]
+        })
+        return manifest
+
+    def tearDown(self):
+        tasklist = { 'reaperd', 
+                     'rwlogd-report-c', 
+                     'launch_ui.sh' }
+        for proc in psutil.process_iter():
+          if proc.name() in tasklist:
+             print("killing", proc.name())
+             try:
+               proc.kill()
+             except:
+               print(proc.name(), "no longer exists")
+        self.loop.stop()
+        self.loop.close()
+
+
+class LaunchPadTest(LaunchPad):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+    @asyncio.coroutine
+    def inventory(self):
+        res_iter = yield from self.dts_mgmt.query_read('/rw-base:vcs/rw-base:info', flags=0)
+        for i in res_iter:
+           info_result = yield from i
+        components = info_result.result.components.component_info
+        recvd_list = {}
+        for component in components:
+            recvd_list[component.component_name] = (component.instance_id, 
+                         component.rwcomponent_parent, 
+                         component.component_type,
+                         component.state)
+        return recvd_list
+
+    @asyncio.coroutine
+    def issue_vcrash(self, component_type):
+#       critical_components = {'msgbroker', 'dtsrouter'}
+        critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+        comp_inventory = yield from self.inventory()
+        for component in comp_inventory:
+          if ((comp_inventory[component])[2] == component_type):
+              inst = (comp_inventory[component])[0]
+              if (component in critical_components):
+                  print(component, 'Marked as CRITICAL - Not restarting')
+              else:
+                  print('Crashing ', component_type,component)
+                  vcrash_input = rwvcs.VCrashInput(instance_name=component+'-'+str(inst))
+                  query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vcrash",
+                                                      flags=0, msg=vcrash_input)
+                  yield from asyncio.sleep(1, loop=self.loop)
+                  restarted_inventory = yield from self.inventory()
+                  self.assertTrue(restarted_inventory[component][3] != 'TO_RECOVER')
+
+    def test_launch_pad(self):
+        """
+        Verify the launchpad setup functions
+        The test will progress through stages defined by the events list:
+            0:  mission_control setup is brought up
+            2:  Tasklet/PROC/VM restarts tested to confirm recovery is proper
+        """
+
+        print("{{{{{{{{{{{{{{{{{{{{STARTING - mano recovery test")
+#       confd_host="127.0.0.1"
+
+        events = [asyncio.Event(loop=self.loop) for _ in range(2)]
+
+        @asyncio.coroutine
+        def sub():
+
+            tinfo = self.new_tinfo('sub')
+            self.dts_mgmt = rift.tasklets.DTS(tinfo, self.schema, self.loop)
+
+            # Sleep for DTS registrations to complete
+            print('.........................................................')
+            print('........SLEEPING 80 seconds for system to come up........')
+            yield from asyncio.sleep(80, loop=self.loop)
+            print('........RESUMING........')
+
+            @asyncio.coroutine
+            def issue_vstop(component,inst,flag=0):
+                vstop_input = rwvcs.VStopInput(instance_name=component+'-'+(str(inst))) 
+                query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vstop",
+                                    flags=flag, msg=vstop_input)
+                yield from asyncio.sleep(1, loop=self.loop)
+
+
+
+            @asyncio.coroutine
+            def issue_vstart(component, parent, recover=False):
+                vstart_input = rwvcs.VStartInput()
+                vstart_input.component_name = component
+                vstart_input.parent_instance = parent
+                vstart_input.recover = recover
+                query_iter = yield from self.dts_mgmt.query_rpc( xpath="/rw-vcs:vstart",
+                                                      flags=0, msg=vstart_input)
+                yield from asyncio.sleep(1, loop=self.loop)
+
+            @asyncio.coroutine
+            def issue_start_stop(comp_inventory, component_type):
+#               critical_components = {'msgbroker', 'dtsrouter'}
+                critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+                for component in comp_inventory:
+                  if ((comp_inventory[component])[2] == component_type):
+                      inst = (comp_inventory[component])[0]
+                      parent = (comp_inventory[component])[1]
+                      if (component in critical_components):
+                          print(component, 'Marked as CRITICAL - Not restarting')
+                      else:
+                          print('Stopping ', component_type,component)
+                          yield from issue_vstop(component,inst)
+                          restarted_inventory = yield from self.inventory()
+#                         self.assertEqual(restarted_inventory[component][3],'TO_RECOVER')
+                          print('Starting ',component_type,component)
+                          yield from issue_vstart(component, parent, recover=True)
+                          restarted_inventory = yield from self.inventory()
+                          self.assertTrue(restarted_inventory[component][3] != 'TO_RECOVER')
+
+            yield from asyncio.sleep(20, loop=self.loop)
+            comp_inventory = yield from self.inventory()
+            yield from issue_start_stop(comp_inventory, 'RWTASKLET')
+#           yield from issue_start_stop(comp_inventory, 'RWPROC')
+#           yield from self.issue_vcrash('RWTASKLET')
+
+            yield from asyncio.sleep(20, loop=self.loop)
+            restarted_inventory = yield from self.inventory()
+#           critical_components = {'msgbroker', 'dtsrouter', 'RW.uAgent'}
+            for comp in comp_inventory:
+                self.assertEqual(str(comp_inventory[comp]), str(restarted_inventory[comp])) 
+#               if (comp not in critical_components):
+#                   inst = (comp_inventory[comp])[0]
+#                   yield from issue_vstop(comp,inst)
+
+            events[1].set()
+
+        asyncio.ensure_future(sub(), loop=self.loop)
+        self.run_until(events[1].is_set, timeout=260)
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+    if 'DTS_TEST_PUB_DIR' not in os.environ:
+        os.environ['DTS_TEST_PUB_DIR'] = os.path.join(plugin_dir, 'dtstestpub')
+
+    if 'RIFT_NO_SUDO_REAPER' not in os.environ:
+        os.environ['RIFT_NO_SUDO_REAPER'] = '1'
+
+    if 'MESSAGE_BROKER_DIR' not in os.environ:
+        os.environ['MESSAGE_BROKER_DIR'] = os.path.join(plugin_dir, 'rwmsgbroker-c')
+
+    if 'ROUTER_DIR' not in os.environ:
+        os.environ['ROUTER_DIR'] = os.path.join(plugin_dir, 'rwdtsrouter-c')
+
+    if 'RW_VAR_RIFT' not in os.environ:
+        os.environ['RW_VAR_RIFT'] = '1'
+    
+    if 'INSTALLDIR' in os.environ:
+        os.chdir(os.environ.get('INSTALLDIR')) 
+
+#   if 'RWMSG_BROKER_SHUNT' not in os.environ:
+#       os.environ['RWMSG_BROKER_SHUNT'] = '1'
+
+    if 'TEST_ENVIRON' not in os.environ:
+        os.environ['TEST_ENVIRON'] = '1'
+
+    if 'RW_MANIFEST' not in os.environ:
+        os.environ['RW_MANIFEST'] = os.path.join(install_dir, 'lptestmanifest.xml')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    args, _ = parser.parse_known_args()
+
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwlaunchpad/test/mano_error_ut.py b/rwlaunchpad/test/mano_error_ut.py
new file mode 100755 (executable)
index 0000000..e593cee
--- /dev/null
@@ -0,0 +1,898 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+
+import xmlrunner
+
+import gi.repository.RwDts as rwdts
+import gi.repository.RwNsmYang as rwnsmyang
+import gi.repository.RwResourceMgrYang as RwResourceMgrYang
+import gi.repository.RwLaunchpadYang as launchpadyang
+import rift.tasklets
+import rift.test.dts
+
+import mano_ut
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class OutOfResourceError(Exception):
+    pass
+
+
+class ComputeResourceRequestMockEventHandler(object):
+    def __init__(self):
+        self._pool_name = "vm_pool"
+        self._vdu_id = str(uuid.uuid4())
+        self._vdu_info = {
+                "vdu_id": self._vdu_id,
+                "state": "active",
+                "management_ip": "1.1.1.1",
+                "public_ip": "1.1.1.1",
+                "connection_points": [],
+                }
+
+        self._resource_state = "active"
+
+        self._event_id = None
+        self._request_info = None
+
+    def allocate(self, event_id, request_info):
+        self._event_id = event_id
+        self._request_info = request_info
+
+        self._vdu_info.update({
+            "name": self._request_info.name,
+            "flavor_id": self._request_info.flavor_id,
+            "image_id": self._request_info.image_id,
+            })
+
+        for cp in request_info.connection_points:
+            info_cp = dict(
+                name=cp.name,
+                virtual_link_id=cp.virtual_link_id,
+                vdu_id=self._vdu_id,
+                state="active",
+                ip_address="1.2.3.4",
+                )
+            info_cp = self._vdu_info["connection_points"].append(info_cp)
+
+    @property
+    def event_id(self):
+        return self._event_id
+
+    @property
+    def resource_state(self):
+        return self._resource_state
+
+    def set_active(self):
+        self._resource_state = "active"
+
+    def set_failed(self):
+        self._resource_state = "failed"
+
+    def set_pending(self):
+        self._resource_state = "pending"
+
+    @property
+    def response_msg(self):
+        resource_info = dict(
+                pool_name=self._pool_name,
+                resource_state=self.resource_state,
+                )
+        resource_info.update(self._vdu_info)
+
+        response = RwResourceMgrYang.VDUEventData.from_dict(dict(
+            event_id=self._event_id,
+            request_info=self._request_info.as_dict(),
+            resource_info=resource_info,
+            ))
+
+        return response.resource_info
+
+
+class NetworkResourceRequestMockEventHandler(object):
+    def __init__(self):
+        self._pool_name = "network_pool"
+        self._link_id = str(uuid.uuid4())
+        self._link_info = {
+                "virtual_link_id": self._link_id,
+                "state": "active",
+                }
+
+        self._resource_state = "active"
+
+        self._event_id = None
+        self._request_info = None
+
+    def allocate(self, event_id, request_info):
+        self._event_id = event_id
+        self._request_info = request_info
+
+        self._link_info.update({
+            "name": self._request_info.name,
+            "subnet": self._request_info.subnet,
+            })
+
+    @property
+    def event_id(self):
+        return self._event_id
+
+    @property
+    def resource_state(self):
+        return self._resource_state
+
+    def set_active(self):
+        self._resource_state = "active"
+
+    def set_failed(self):
+        self._resource_state = "failed"
+
+    def set_pending(self):
+        self._resource_state = "pending"
+
+    @property
+    def response_msg(self):
+        resource_info = dict(
+                pool_name=self._pool_name,
+                resource_state=self.resource_state,
+                )
+        resource_info.update(self._link_info)
+
+        response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict(
+            event_id=self._event_id,
+            request_info=self._request_info.as_dict(),
+            resource_info=resource_info,
+            ))
+
+        return response.resource_info
+
+
+class ResourceMgrMock(object):
+    VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+    VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+
+    def __init__(self, dts, log, loop):
+        self._log = log
+        self._dts = dts
+        self._loop = loop
+        self._vdu_reg = None
+        self._link_reg = None
+
+        self._vdu_reg_event = asyncio.Event(loop=self._loop)
+        self._link_reg_event = asyncio.Event(loop=self._loop)
+
+        self._available_compute_handlers = []
+        self._available_network_handlers = []
+
+        self._used_compute_handlers = {}
+        self._used_network_handlers = {}
+
+        self._compute_allocate_requests = 0
+        self._network_allocate_requests = 0
+
+        self._registered = False
+
+    def _allocate_virtual_compute(self, event_id, request_info):
+        self._compute_allocate_requests += 1
+
+        if not self._available_compute_handlers:
+            raise OutOfResourceError("No more compute handlers")
+
+        handler = self._available_compute_handlers.pop()
+        handler.allocate(event_id, request_info)
+        self._used_compute_handlers[event_id] = handler
+
+        return handler.response_msg
+
+    def _allocate_virtual_network(self, event_id, request_info):
+        self._network_allocate_requests += 1
+
+        if not self._available_network_handlers:
+            raise OutOfResourceError("No more network handlers")
+
+        handler = self._available_network_handlers.pop()
+        handler.allocate(event_id, request_info)
+        self._used_network_handlers[event_id] = handler
+
+        return handler.response_msg
+
+    def _release_virtual_network(self, event_id):
+        del self._used_network_handlers[event_id]
+
+    def _release_virtual_compute(self, event_id):
+        del self._used_compute_handlers[event_id]
+
+    def _read_virtual_network(self, event_id):
+        return self._used_network_handlers[event_id].response_msg
+
+    def _read_virtual_compute(self, event_id):
+        return self._used_compute_handlers[event_id].response_msg
+
+    @asyncio.coroutine
+    def on_link_request_prepare(self, xact_info, action, ks_path, request_msg):
+        if not self._registered:
+            self._log.error("Got a prepare callback when not registered!")
+            xact_info.respond_xpath(rwdts.XactRspCode.NA)
+            return
+
+        self._log.debug("Received virtual-link on_prepare callback (self: %s, xact_info: %s, action: %s): %s",
+                        self, xact_info, action, request_msg)
+
+        response_info = None
+        response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+        schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+        pathentry = schema.keyspec_to_entry(ks_path)
+
+        if action == rwdts.QueryAction.CREATE:
+            response_info = self._allocate_virtual_network(
+                    pathentry.key00.event_id,
+                    request_msg.request_info,
+                    )
+
+        elif action == rwdts.QueryAction.DELETE:
+            self._release_virtual_network(pathentry.key00.event_id)
+
+        elif action == rwdts.QueryAction.READ:
+            response_info = self._read_virtual_network(
+                    pathentry.key00.event_id
+                    )
+        else:
+            raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
+
+        self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
+                        response_xpath, response_info)
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+    @asyncio.coroutine
+    def on_vdu_request_prepare(self, xact_info, action, ks_path, request_msg):
+        if not self._registered:
+            self._log.error("Got a prepare callback when not registered!")
+            xact_info.respond_xpath(rwdts.XactRspCode.NA)
+            return
+
+        @asyncio.coroutine
+        def monitor_vdu_state(response_xpath, pathentry):
+            self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
+            loop_cnt = 120
+            while loop_cnt > 0:
+                self._log.debug("VDU state monitoring: Sleeping for 1 second ")
+                yield from asyncio.sleep(1, loop = self._loop)
+                try:
+                    response_info = self._read_virtual_compute(
+                            pathentry.key00.event_id
+                            )
+                except Exception as e:
+                    self._log.error(
+                            "VDU state monitoring: Received exception %s "
+                            "in VDU state monitoring for %s. Aborting monitoring",
+                            str(e), response_xpath
+                            )
+                    raise
+
+                if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
+                    self._log.info(
+                            "VDU state monitoring: VDU reached terminal state."
+                            "Publishing VDU info: %s at path: %s",
+                            response_info, response_xpath
+                            )
+                    yield from self._dts.query_update(response_xpath,
+                                                      rwdts.XactFlag.ADVISE,
+                                                      response_info)
+                    return
+                else:
+                    loop_cnt -= 1
+
+            ### End of while loop. This is only possible if VDU did not reach active state
+            self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring",
+                           response_xpath)
+            response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+            response_info.resource_state = 'failed'
+            yield from self._dts.query_update(response_xpath,
+                                              rwdts.XactFlag.ADVISE,
+                                              response_info)
+            return
+
+        self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
+                        xact_info, action, request_msg)
+
+        response_info = None
+        response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+
+        schema = RwResourceMgrYang.VDUEventData().schema()
+        pathentry = schema.keyspec_to_entry(ks_path)
+
+        if action == rwdts.QueryAction.CREATE:
+            response_info = self._allocate_virtual_compute(
+                    pathentry.key00.event_id,
+                    request_msg.request_info,
+                    )
+            if response_info.resource_state == 'pending':
+                asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+                                      loop = self._loop)
+
+        elif action == rwdts.QueryAction.DELETE:
+            self._release_virtual_compute(
+                    pathentry.key00.event_id
+                    )
+
+        elif action == rwdts.QueryAction.READ:
+            response_info = self._read_virtual_compute(
+                    pathentry.key00.event_id
+                    )
+        else:
+            raise ValueError("Only create/delete actions available. Received action: %s" %(action))
+
+        self._log.debug("Responding with VDUInfo at xpath %s: %s",
+                        response_xpath, response_info)
+
+        xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_request_ready(registration, status):
+            self._log.debug("Got request ready event (registration: %s) (status: %s)",
+                            registration, status)
+
+            if registration == self._link_reg:
+                self._link_reg_event.set()
+            elif registration == self._vdu_reg:
+                self._vdu_reg_event.set()
+            else:
+                self._log.error("Unknown registration ready event: %s", registration)
+
+
+        with self._dts.group_create() as group:
+            self._log.debug("Registering for Link Resource Request using xpath: %s",
+                            ResourceMgrMock.VLINK_REQUEST_XPATH)
+
+            self._link_reg = group.register(
+                    xpath=ResourceMgrMock.VLINK_REQUEST_XPATH,
+                    handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                  on_prepare=self.on_link_request_prepare),
+                    flags=rwdts.Flag.PUBLISHER)
+
+            self._log.debug("Registering for VDU Resource Request using xpath: %s",
+                            ResourceMgrMock.VDU_REQUEST_XPATH)
+
+            self._vdu_reg = group.register(
+                    xpath=ResourceMgrMock.VDU_REQUEST_XPATH,
+                    handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+                                                                  on_prepare=self.on_vdu_request_prepare),
+                    flags=rwdts.Flag.PUBLISHER)
+
+        self._registered = True
+
+    def unregister(self):
+        self._link_reg.deregister()
+        self._vdu_reg.deregister()
+        self._registered = False
+
+    @asyncio.coroutine
+    def wait_ready(self, timeout=5):
+        self._log.debug("Waiting for all request registrations to become ready.")
+        yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
+                                timeout=timeout, loop=self._loop)
+
+    def create_compute_mock_event_handler(self):
+        handler = ComputeResourceRequestMockEventHandler()
+        self._available_compute_handlers.append(handler)
+
+        return handler
+
+    def create_network_mock_event_handler(self):
+        handler = NetworkResourceRequestMockEventHandler()
+        self._available_network_handlers.append(handler)
+
+        return handler
+
+    @property
+    def num_compute_requests(self):
+        return self._compute_allocate_requests
+
+    @property
+    def num_network_requests(self):
+        return self._network_allocate_requests
+
+    @property
+    def num_allocated_compute_resources(self):
+        return len(self._used_compute_handlers)
+
+    @property
+    def num_allocated_network_resources(self):
+        return len(self._used_network_handlers)
+
+
+@unittest.skip('failing and needs rework')
+class ManoErrorTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwvns'),
+                'rwvnstasklet'
+                )
+
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwvnfm'),
+                'rwvnfmtasklet'
+                )
+
+        rwmain.add_tasklet(
+                os.path.join(plugin_dir, 'rwnsm'),
+                'rwnsmtasklet'
+                )
+
+        cls.waited_for_tasklets = False
+
+    @asyncio.coroutine
+    def register_mock_res_mgr(self):
+        self.res_mgr = ResourceMgrMock(
+                self.dts,
+                self.log,
+                self.loop,
+                )
+        yield from self.res_mgr.register()
+
+        self.log.info("Waiting for resource manager to be ready")
+        yield from self.res_mgr.wait_ready()
+
+    def unregister_mock_res_mgr(self):
+        self.res_mgr.unregister()
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        if not ManoErrorTestCase.waited_for_tasklets:
+            yield from asyncio.sleep(5, loop=self.loop)
+            ManoErrorTestCase.waited_for_tasklets = True
+
+    @asyncio.coroutine
+    def publish_desciptors(self, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+        yield from self.ping_pong.publish_desciptors(
+                num_external_vlrs,
+                num_internal_vlrs,
+                num_ping_vms
+                )
+
+    def unpublish_descriptors(self):
+        self.ping_pong.unpublish_descriptors()
+
+    @asyncio.coroutine
+    def wait_until_nsr_active_or_failed(self, nsr_id, timeout_secs=20):
+        start_time = time.time()
+        while (time.time() - start_time) < timeout_secs:
+            nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+            self.assertEqual(1, len(nsrs))
+            if nsrs[0].operational_status in ['running', 'failed']:
+                return
+
+            self.log.debug("Rcvd NSR with %s status", nsrs[0].operational_status)
+            yield from asyncio.sleep(2, loop=self.loop)
+
+        self.assertIn(nsrs[0].operational_status, ['running', 'failed'])
+
+    def verify_number_compute_requests(self, num_requests):
+        self.assertEqual(num_requests, self.res_mgr.num_compute_requests)
+
+    def verify_number_network_requests(self, num_requests):
+        self.assertEqual(num_requests, self.res_mgr.num_network_requests)
+
+    def verify_number_allocated_compute(self, num_allocated):
+        self.assertEqual(num_allocated, self.res_mgr.num_allocated_compute_resources)
+
+    def verify_number_allocated_network(self, num_allocated):
+        self.assertEqual(num_allocated, self.res_mgr.num_allocated_network_resources)
+
+    def allocate_network_handlers(self, num_networks):
+        return [self.res_mgr.create_network_mock_event_handler() for _ in range(num_networks)]
+
+    def allocate_compute_handlers(self, num_computes):
+        return [self.res_mgr.create_compute_mock_event_handler() for _ in range(num_computes)]
+
+    @asyncio.coroutine
+    def create_mock_launchpad_tasklet(self):
+        yield from mano_ut.create_mock_launchpad_tasklet(self.log, self.dts)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.ping_pong = mano_ut.PingPongDescriptorPublisher(self.log, self.loop, self.dts)
+        self.querier = mano_ut.ManoQuerier(self.log, self.dts)
+
+        # Add a task to wait for tasklets to come up
+        asyncio.ensure_future(self.wait_tasklets(), loop=self.loop)
+
+    @rift.test.dts.async_test
+    def test_fail_first_nsm_vlr(self):
+        yield from self.publish_desciptors(num_external_vlrs=2)
+        yield from self.register_mock_res_mgr()
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(1)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 2)
+        yield from self.verify_num_vnfrs(0)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "failed")
+
+        self.verify_number_network_requests(1)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_second_nsm_vlr(self):
+        yield from self.publish_desciptors(num_external_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(2)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 2)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+        yield from self.verify_vlr_state(nsr_vlrs[1], "failed")
+
+        self.verify_number_network_requests(2)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(1)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_first_vlr(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(2)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "failed")
+
+        self.verify_number_network_requests(2)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(1)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_second_vlr(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2)
+        yield from self.register_mock_res_mgr()
+        self.allocate_network_handlers(2)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(0)
+        self.verify_number_allocated_network(2)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_first_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(3)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(1)
+        self.verify_number_allocated_network(3)
+        self.verify_number_allocated_compute(0)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_first_vnf_second_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(3)
+        self.allocate_compute_handlers(1)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(3)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(1)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "failed")
+
+        self.verify_number_network_requests(3)
+        self.verify_number_compute_requests(2)
+        self.verify_number_allocated_network(3)
+        self.verify_number_allocated_compute(1)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+    @rift.test.dts.async_test
+    def test_fail_second_vnf_second_vdu(self):
+        yield from self.publish_desciptors(num_internal_vlrs=2, num_ping_vms=2)
+        yield from self.register_mock_res_mgr()
+        yield from self.create_mock_launchpad_tasklet()
+        self.allocate_network_handlers(5)
+        self.allocate_compute_handlers(3)
+
+        nsr_id = yield from self.ping_pong.create_nsr()
+        yield from self.wait_until_nsr_active_or_failed(nsr_id)
+
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 1)
+        yield from self.verify_nsr_state(nsr_id, "failed")
+        yield from self.verify_num_vlrs(5)
+        yield from self.verify_num_nsr_vlrs(nsr_id, 1)
+
+        nsr_vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        yield from self.verify_vlr_state(nsr_vlrs[0], "running")
+
+        yield from self.verify_num_nsr_vnfrs(nsr_id, 2)
+
+        # Verify only a single vnfr was instantiated and is failed
+        yield from self.verify_num_vnfrs(2)
+        nsr_vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        yield from self.verify_vnf_state(nsr_vnfs[0], "running")
+        yield from self.verify_vnf_state(nsr_vnfs[1], "failed")
+
+        yield from self.verify_num_vnfr_vlrs(nsr_vnfs[0], 2)
+
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[0])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        vnf_vlrs = yield from self.get_vnf_vlrs(nsr_vnfs[1])
+        yield from self.verify_vlr_state(vnf_vlrs[0], "running")
+        yield from self.verify_vlr_state(vnf_vlrs[1], "running")
+
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[0], 2)
+        yield from self.verify_num_vnfr_vdus(nsr_vnfs[1], 2)
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[0])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "running")
+
+        vdus = yield from self.get_vnf_vdus(nsr_vnfs[1])
+        self.verify_vdu_state(vdus[0], "running")
+        self.verify_vdu_state(vdus[1], "failed")
+
+        self.verify_number_network_requests(5)
+        self.verify_number_compute_requests(4)
+        self.verify_number_allocated_network(5)
+        self.verify_number_allocated_compute(3)
+
+        yield from self.terminate_nsr(nsr_id)
+
+        yield from self.verify_nsr_deleted(nsr_id)
+        yield from self.verify_nsd_ref_count(self.ping_pong.nsd_id, 0)
+        yield from self.verify_num_vlrs(0)
+
+        self.verify_number_allocated_network(0)
+        self.verify_number_allocated_compute(0)
+
+        self.unregister_mock_res_mgr()
+        self.unpublish_descriptors()
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    args, _ = parser.parse_known_args()
+
+    ManoErrorTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw
diff --git a/rwlaunchpad/test/mano_ut.py b/rwlaunchpad/test/mano_ut.py
new file mode 100755 (executable)
index 0000000..69a0d40
--- /dev/null
@@ -0,0 +1,1198 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+import argparse
+import logging
+import time
+import types
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+from gi.repository import (
+    RwCloudYang as rwcloudyang,
+    RwDts as rwdts,
+    RwLaunchpadYang as launchpadyang,
+    RwNsmYang as rwnsmyang,
+    RwNsrYang as rwnsryang,
+    NsrYang as nsryang,
+    RwResourceMgrYang as rmgryang,
+    RwcalYang as rwcalyang,
+    RwConfigAgentYang as rwcfg_agent,
+    RwlogMgmtYang
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+import rift.tasklets
+import rift.test.dts
+import rw_peas
+
+
+openstack_info = {
+        'username': 'pluto',
+        'password': 'mypasswd',
+        'auth_url': 'http://10.66.4.27:5000/v3/',
+        'project_name': 'demo',
+        'mgmt_network': 'private',
+        }
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class XPaths(object):
+    @staticmethod
+    def nsd(k=None):
+        return ("C,/nsd:nsd-catalog/nsd:nsd" +
+                ("[nsd:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vld(k=None):
+        return ("C,/vld:vld-catalog/vld:vld" +
+                ("[vld:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfd(k=None):
+        return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
+                ("[vnfd:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfr(k=None):
+        return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
+                ("[vnfr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vlr(k=None):
+        return ("D,/vlr:vlr-catalog/vlr:vlr" +
+                ("[vlr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsd_ref_count(k=None):
+        return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" +
+                ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def vnfd_ref_count(k=None):
+        return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" +
+                ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_config(k=None):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                ("[nsr:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_opdata(k=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+                ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_config_status(k=None):
+        return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+                ("[nsr:ns-instance-config-ref='{}']/config_status".format(k) if k is not None else ""))
+
+    @staticmethod
+    def cm_state(k=None):
+        if k is None:
+            return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
+        else:
+            return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+                    ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+
+    @staticmethod
+    def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
+        return (("D,/nsr:ns-instance-opdata/nsr:nsr") +
+                ("[nsr:ns-instance-config-ref='{}']".format(nsr_id) if nsr_id is not None else "") +
+                ("/nsr:scaling-group-record") +
+                ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+                ("/nsr:instance") +
+                ("[nsr:scaling-group-index-ref='{}']".format(index) if index is not None else ""))
+
+    @staticmethod
+    def nsr_scale_group_instance_config(nsr_id=None, group_name=None, index=None):
+        return (("C,/nsr:ns-instance-config/nsr:nsr") +
+                ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else "") +
+                ("/nsr:scaling-group") +
+                ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+                ("/nsr:instance") +
+                ("[nsr:index='{}']".format(index) if index is not None else ""))
+
+
+class ManoQuerier(object):
+    def __init__(self, log, dts):
+        self.log = log
+        self.dts = dts
+
+    @asyncio.coroutine
+    def _read_query(self, xpath, do_trace=False):
+        self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+        flags = rwdts.XactFlag.MERGE
+        flags += rwdts.XactFlag.TRACE if do_trace else 0
+        res_iter = yield from self.dts.query_read(
+                xpath, flags=flags
+                )
+
+        results = []
+        for i in res_iter:
+            result = yield from i
+            if result is not None:
+                results.append(result.result)
+
+        return results
+
+    @asyncio.coroutine
+    def get_cm_state(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
+
+    @asyncio.coroutine
+    def get_nsr_opdatas(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_opdata(nsr_id), False))
+
+    @asyncio.coroutine
+    def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
+        return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
+        #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
+
+    @asyncio.coroutine
+    def get_nsr_configs(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_config(nsr_id)))
+
+    @asyncio.coroutine
+    def get_nsr_config_status(self, nsr_id=None):
+        return (yield from self._read_query(XPaths.nsr_config_status(nsr_id)))
+
+    @asyncio.coroutine
+    def get_vnfrs(self, vnfr_id=None):
+        return (yield from self._read_query(XPaths.vnfr(vnfr_id)))
+
+    @asyncio.coroutine
+    def get_vlrs(self, vlr_id=None):
+        return (yield from self._read_query(XPaths.vlr(vlr_id)))
+
+    @asyncio.coroutine
+    def get_nsd_ref_counts(self, nsd_id=None):
+        return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id)))
+
+    @asyncio.coroutine
+    def get_vnfd_ref_counts(self, vnfd_id=None):
+        return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id)))
+
+    @asyncio.coroutine
+    def delete_nsr(self, nsr_id):
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    XPaths.nsr_config(nsr_id),
+                    0
+                    #rwdts.XactFlag.TRACE,
+                    #rwdts.Flag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def delete_nsd(self, nsd_id):
+        nsd_xpath = XPaths.nsd(nsd_id)
+        self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    nsd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def delete_vnfd(self, vnfd_id):
+        vnfd_xpath = XPaths.vnfd(vnfd_id)
+        self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_delete(
+                    vnfd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    )
+
+    @asyncio.coroutine
+    def update_nsd(self, nsd_id, nsd_msg):
+        nsd_xpath = XPaths.nsd(nsd_id)
+        self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    nsd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    nsd_msg,
+                    )
+
+    @asyncio.coroutine
+    def update_vnfd(self, vnfd_id, vnfd_msg):
+        vnfd_xpath = XPaths.vnfd(vnfd_id)
+        self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    vnfd_xpath,
+                    rwdts.XactFlag.ADVISE,
+                    vnfd_msg,
+                    )
+
+    @asyncio.coroutine
+    def update_nsr_config(self, nsr_id, nsr_msg):
+        nsr_xpath = XPaths.nsr_config(nsr_id)
+        self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
+        with self.dts.transaction() as xact:
+            yield from self.dts.query_update(
+                    nsr_xpath,
+                    rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
+                    nsr_msg,
+                    )
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    @asyncio.coroutine
+    def verify_nsr_state(self, nsr_id, state):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        self.assertEqual(1, len(nsrs))
+        nsr = nsrs[0]
+
+        self.log.debug("Got nsr = %s", nsr)
+        self.assertEqual(state, nsr.operational_status)
+
+    @asyncio.coroutine
+    def verify_vlr_state(self, vlr_id, state):
+        vlrs = yield from self.querier.get_vlrs(vlr_id)
+        self.assertEqual(1, len(vlrs))
+        vlr = vlrs[0]
+
+        self.assertEqual(state, vlr.operational_status)
+
+    def verify_vdu_state(self, vdu, state):
+        self.assertEqual(state, vdu.operational_status)
+
+    @asyncio.coroutine
+    def verify_vnf_state(self, vnfr_id, state):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        self.assertEqual(1, len(vnfrs))
+        vnfr = vnfrs[0]
+
+        self.assertEqual(state, vnfr.operational_status)
+
+    @asyncio.coroutine
+    def terminate_nsr(self, nsr_id):
+        self.log.debug("Terminating nsr id: %s", nsr_id)
+        yield from self.querier.delete_nsr(nsr_id)
+
+    @asyncio.coroutine
+    def verify_nsr_deleted(self, nsr_id):
+        nsr_opdatas = yield from self.querier.get_nsr_opdatas(nsr_id)
+        self.assertEqual(0, len(nsr_opdatas))
+
+        nsr_configs = yield from self.querier.get_nsr_configs(nsr_id)
+        self.assertEqual(0, len(nsr_configs))
+
+    @asyncio.coroutine
+    def verify_num_vlrs(self, num_vlrs):
+        vlrs = yield from self.querier.get_vlrs()
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def get_nsr_vlrs(self, nsr_id):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        return [v.vlr_ref for v in nsrs[0].vlr]
+
+    @asyncio.coroutine
+    def get_nsr_vnfs(self, nsr_id):
+        nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
+        return nsrs[0].constituent_vnfr_ref
+
+    @asyncio.coroutine
+    def get_vnf_vlrs(self, vnfr_id):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        return [i.vlr_ref for i in vnfrs[0].internal_vlr]
+
+    @asyncio.coroutine
+    def verify_num_nsr_vlrs(self, nsr_id, num_vlrs):
+        vlrs = yield from self.get_nsr_vlrs(nsr_id)
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def verify_num_nsr_vnfrs(self, nsr_id, num_vnfs):
+        vnfs = yield from self.get_nsr_vnfs(nsr_id)
+        self.assertEqual(num_vnfs, len(vnfs))
+
+    @asyncio.coroutine
+    def verify_num_vnfr_vlrs(self, vnfr_id, num_vlrs):
+        vlrs = yield from self.get_vnf_vlrs(vnfr_id)
+        self.assertEqual(num_vlrs, len(vlrs))
+
+    @asyncio.coroutine
+    def get_vnf_vdus(self, vnfr_id):
+        vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
+        return [i for i in vnfrs[0].vdur]
+
+    @asyncio.coroutine
+    def verify_num_vnfr_vdus(self, vnfr_id, num_vdus):
+        vdus = yield from self.get_vnf_vdus(vnfr_id)
+        self.assertEqual(num_vdus, len(vdus))
+
+    @asyncio.coroutine
+    def verify_num_vnfrs(self, num_vnfrs):
+        vnfrs = yield from self.querier.get_vnfrs()
+        self.assertEqual(num_vnfrs, len(vnfrs))
+
+    @asyncio.coroutine
+    def verify_nsd_ref_count(self, nsd_id, num_ref):
+        nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
+        self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
+class DescriptorPublisher(object):
+    def __init__(self, log, loop, dts):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+
+class PingPongNsrConfigPublisher(object):
+    XPATH = "C,/nsr:ns-instance-config"
+
+    def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.ref = None
+
+        self.querier = ManoQuerier(log, dts)
+
+        self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
+
+        nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr.id = str(uuid.uuid4())
+        nsr.name = "ns1.{}".format(nsr.id)
+        nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+        nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
+        nsr.cloud_account = cloud_account_name
+
+        nsr.vnf_cloud_account_map.add().from_dict({
+            'member_vnf_index_ref': nsr.nsd.constituent_vnfd[0].member_vnf_index,
+            'config_agent_account': 'RiftCA',
+            #'cloud_account':'mock_account1'
+        })
+
+        inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
+        inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+        inputs.value = "inigo montoya"
+
+        fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
+        self.create_nsd_placement_group_map(nsr,
+                                            group_name      = 'Orcus',
+                                            cloud_type      = 'openstack',
+                                            construct_type  = 'host_aggregate',
+                                            construct_value = [fast_cpu])
+
+        fast_storage = {'metadata_key': 'FASTSSD', 'metadata_value': 'True'}
+        self.create_nsd_placement_group_map(nsr,
+                                            group_name      = 'Quaoar',
+                                            cloud_type      = 'openstack',
+                                            construct_type  = 'host_aggregate',
+                                            construct_value = [fast_storage])
+
+        fast_cpu = {'metadata_key': 'BLUE_HW', 'metadata_value': 'True'}
+        self.create_vnfd_placement_group_map(nsr,
+                                             group_name      = 'Eris',
+                                             vnfd_id         = ping_pong.ping_vnfd_id,
+                                             cloud_type      = 'openstack',
+                                             construct_type  = 'host_aggregate',
+                                             construct_value = [fast_cpu])
+
+        fast_storage = {'metadata_key': 'YELLOW_HW', 'metadata_value': 'True'}
+        self.create_vnfd_placement_group_map(nsr,
+                                             group_name      = 'Weywot',
+                                             vnfd_id         = ping_pong.pong_vnfd_id,
+                                             cloud_type      = 'openstack',
+                                             construct_type  = 'host_aggregate',
+                                             construct_value = [fast_storage])
+
+
+        nsr.input_parameter.append(inputs)
+
+        self._nsr = nsr
+        self.nsr_config.nsr.append(nsr)
+
+        self._ready_event = asyncio.Event(loop=self.loop)
+        asyncio.ensure_future(self.register(), loop=loop)
+
+    @asyncio.coroutine
+    def register(self):
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self._ready_event.set()
+
+        self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+        self.reg = yield from self.dts.register(
+                PingPongNsrConfigPublisher.XPATH,
+                flags=rwdts.Flag.PUBLISHER,
+                handler=rift.tasklets.DTS.RegistrationHandler(
+                    on_ready=on_ready,
+                    ),
+                )
+
+    @asyncio.coroutine
+    def publish(self):
+        self.log.debug("Publishing NSR: {}".format(self.nsr_config))
+        yield from self._ready_event.wait()
+        with self.dts.transaction() as xact:
+            self.reg.create_element(
+                    PingPongNsrConfigPublisher.XPATH,
+                    self.nsr_config,
+                    xact=xact.xact,
+                    )
+
+        return self._nsr.id
+
+    @asyncio.coroutine
+    def create_scale_group_instance(self, group_name, index):
+        index = 1
+        scaling_group = self.nsr_config.nsr[0].scaling_group.add()
+        scaling_group.from_dict({
+            "scaling_group_name_ref": group_name,
+            "instance": [{"index": index}],
+            })
+        with self.dts.transaction() as xact:
+            self.reg.update_element(
+                    PingPongNsrConfigPublisher.XPATH,
+                    self.nsr_config,
+                    xact=xact.xact,
+                    )
+
+        return index
+
+    def create_nsd_placement_group_map(self,
+                                       nsr,
+                                       group_name,
+                                       cloud_type,
+                                       construct_type,
+                                       construct_value):
+        placement_group  = nsr.nsd_placement_group_maps.add()
+        placement_group.from_dict({
+            "placement_group_ref" : group_name,
+            "cloud_type"          : cloud_type,
+            construct_type        : construct_value,
+            })
+        
+
+    def create_vnfd_placement_group_map(self,
+                                        nsr,
+                                        group_name,
+                                        vnfd_id,
+                                        cloud_type,
+                                        construct_type,
+                                        construct_value):
+        placement_group  = nsr.vnfd_placement_group_maps.add()
+        placement_group.from_dict({
+            "placement_group_ref"  : group_name,
+            "vnfd_id_ref"          : vnfd_id,
+            "cloud_type"           : cloud_type,
+            construct_type         : construct_value,
+            })
+        
+    
+    @asyncio.coroutine
+    def delete_scale_group_instance(self, group_name, index):
+        self.log.debug("Deleting scale group %s instance %s", group_name, index)
+        #del self.nsr_config.nsr[0].scaling_group[0].instance[0]
+        xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+        yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+        #with self.dts.transaction() as xact:
+        #    self.reg.update_element(
+        #            PingPongNsrConfigPublisher.XPATH,
+        #            self.nsr_config,
+        #            flags=rwdts.XactFlag.REPLACE,
+        #            xact=xact.xact,
+        #            )
+
+    def deregister(self):
+        if self.reg is not None:
+            self.reg.deregister()
+
+    def create_nsr_vl(self):
+        vld = self.nsr_config.nsr[0].nsd.vld.add()
+        vld.id = 'ping_pong_vld_2'
+        vld.name = 'ping_pong_vld_2'  # hard coded
+        vld.short_name = vld.name
+        vld.vendor = 'RIFT.io'
+        vld.description = 'Toy VL'
+        vld.version = '1.0'
+        vld.type_yang = 'ELAN'
+
+        # cpref = vld.vnfd_connection_point_ref.add()
+        # cpref.member_vnf_index_ref = cp[0]
+        # cpref.vnfd_id_ref = cp[1]
+        # cpref.vnfd_connection_point_ref = cp[2]
+
+        vld = self.nsr_config.nsr[0].vl_cloud_account_map.add()
+        vld.vld_id_ref = 'ping_pong_vld_2'
+        vld.cloud_accounts = ["mock_account"]
+
+    @asyncio.coroutine
+    def add_nsr_vl(self):
+        self.create_nsr_vl()
+        yield from self.querier.update_nsr_config(
+            self.nsr_config.nsr[0].id,
+            self.nsr_config.nsr[0],
+        )
+
+    @asyncio.coroutine
+    def del_nsr_vl(self):
+        for vld in self.nsr_config.nsr[0].nsd.vld:
+            if vld.id == 'ping_pong_vld_2':
+                self.nsr_config.nsr[0].nsd.vld.remove(vld)
+                break
+
+        yield from self.querier.update_nsr_config(
+            self.nsr_config.nsr[0].id,
+            self.nsr_config.nsr[0],
+        )
+
+    def update_vnf_cloud_map(self,vnf_cloud_map):
+        self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
+        for vnf_index,cloud_acct  in vnf_cloud_map.items():
+            vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+            if vnf_maps:
+                vnf_maps[0].cloud_account = cloud_acct
+            else: 
+                self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
+                    'member_vnf_index_ref':vnf_index,
+                    'cloud_account':cloud_acct
+                    })
+
+
+class PingPongDescriptorPublisher(object):
+    def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self.querier = ManoQuerier(self.log, self.dts)
+        self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+        self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                        pingcount=1,
+                        external_vlr_count=num_external_vlrs,
+                        internal_vlr_count=num_internal_vlrs,
+                        num_vnf_vms=2,
+                        mano_ut=True,
+                        use_scale_group=True,
+                        use_mon_params=False,
+                        )
+
+        self.config_dir = os.path.join(os.getenv('RIFT_ARTIFACTS'),
+                                       "launchpad/libs",
+                                       self.ping_pong_nsd.id,
+                                       "config")
+
+    @property
+    def nsd_id(self):
+        return self.ping_pong_nsd.id
+
+    @property
+    def ping_vnfd_id(self):
+        return self.ping_vnfd.id
+
+    @property
+    def pong_vnfd_id(self):
+        return self.pong_vnfd.id
+
+    @asyncio.coroutine
+    def publish_desciptors(self):
+        # Publish ping_vnfd
+        xpath = XPaths.vnfd(self.ping_vnfd_id)
+        xpath_wild = XPaths.vnfd()
+        for obj in self.ping_vnfd.descriptor.vnfd:
+            self.log.debug("Publishing ping_vnfd path: %s - %s, type:%s, obj:%s",
+                           xpath, xpath_wild, type(obj), obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        # Publish pong_vnfd
+        xpath = XPaths.vnfd(self.pong_vnfd_id)
+        xpath_wild = XPaths.vnfd()
+        for obj in self.pong_vnfd.descriptor.vnfd:
+            self.log.debug("Publishing pong_vnfd path: %s, wild_path: %s, obj:%s",
+                           xpath, xpath_wild, obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        # Publish ping_pong_nsd
+        xpath = XPaths.nsd(self.nsd_id)
+        xpath_wild = XPaths.nsd()
+        for obj in self.ping_pong_nsd.descriptor.nsd:
+            self.log.debug("Publishing ping_pong nsd path: %s, wild_path: %s, obj:%s",
+                           xpath, xpath_wild, obj)
+            yield from self.publisher.publish(xpath_wild, xpath, obj)
+
+        self.log.debug("DONE - publish_desciptors")
+
+    def unpublish_descriptors(self):
+        self.publisher.unpublish_all()
+
+    @asyncio.coroutine
+    def delete_nsd(self):
+        yield from self.querier.delete_nsd(self.ping_pong_nsd.id)
+
+    @asyncio.coroutine
+    def delete_ping_vnfd(self):
+        yield from self.querier.delete_vnfd(self.ping_vnfd.id)
+
+    @asyncio.coroutine
+    def update_nsd(self):
+        yield from self.querier.update_nsd(
+                self.ping_pong_nsd.id,
+                self.ping_pong_nsd.descriptor.nsd[0]
+                )
+
+    @asyncio.coroutine
+    def update_ping_vnfd(self):
+        yield from self.querier.update_vnfd(
+                self.ping_vnfd.id,
+                self.ping_vnfd.descriptor.vnfd[0]
+                )
+
+
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        vns_dir = os.environ.get('VNS_DIR')
+        vnfm_dir = os.environ.get('VNFM_DIR')
+        nsm_dir = os.environ.get('NSM_DIR')
+        rm_dir = os.environ.get('RM_DIR')
+
+        rwmain.add_tasklet(vns_dir, 'rwvnstasklet')
+        rwmain.add_tasklet(vnfm_dir, 'rwvnfmtasklet')
+        rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet')
+        rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet')
+        rwmain.add_tasklet(rm_dir, 'rwconmantasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @staticmethod
+    def get_cal_account(account_type, account_name):
+        """
+        Creates an object for class RwcalYang.Clo
+        """
+        account = rwcloudyang.CloudAccount()
+        if account_type == 'mock':
+            account.name          = account_name
+            account.account_type  = "mock"
+            account.mock.username = "mock_user"
+        elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+            account.name = account_name
+            account.account_type = 'openstack'
+            account.openstack.key = openstack_info['username']
+            account.openstack.secret       = openstack_info['password']
+            account.openstack.auth_url     = openstack_info['auth_url']
+            account.openstack.tenant       = openstack_info['project_name']
+            account.openstack.mgmt_network = openstack_info['mgmt_network']
+        return account
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+        account = self.get_cal_account(cloud_type, cloud_name)
+        account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+        self.log.info("Configuring cloud-account: %s", account)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    account)
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(5, loop=self.loop)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
+        self.querier = ManoQuerier(self.log, self.dts)
+        self.nsr_publisher = PingPongNsrConfigPublisher(
+                self.log,
+                loop,
+                self.dts,
+                self.ping_pong,
+                "mock_account",
+                )
+
+    def test_create_nsr_record(self):
+
+        @asyncio.coroutine
+        def verify_cm_state(termination=False, nsrid=None):
+            self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
+            #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
+
+            loop_count = 10
+            loop_sleep = 10
+            while loop_count:
+                yield from asyncio.sleep(loop_sleep, loop=self.loop)
+                loop_count -= 1
+                cm_nsr = None
+                cm_nsr_i = yield from self.querier.get_cm_state(nsr_id=nsrid)
+                if (cm_nsr_i is not None and len(cm_nsr_i) != 0):
+                    self.assertEqual(1, len(cm_nsr_i))
+                    cm_nsr = cm_nsr_i[0].as_dict()
+                    #print("###>>> cm_nsr=", cm_nsr)
+                if termination:
+                    if len(cm_nsr_i) == 0:
+                        print("\n###>>> cm-state NSR deleted OK <<<###\n")
+                        return
+                elif (cm_nsr is not None and
+                    'state' in cm_nsr and
+                    (cm_nsr['state'] == 'ready')):
+                    self.log.debug("Got cm_nsr record %s", cm_nsr)
+                    print("\n###>>> cm-state NSR 'ready' OK <<<###\n")
+                    return
+
+                # if (len(cm_nsr_i) == 1 and cm_nsr_i[0].state == 'ready'):
+                #     self.log.debug("Got cm_nsr record %s", cm_nsr)
+                # else:
+                #     yield from asyncio.sleep(10, loop=self.loop)
+
+            print("###>>> Failed cm-state, termination:", termination)
+            self.assertEqual(1, loop_count)
+
+        @asyncio.coroutine
+        def verify_nsr_opdata(termination=False):
+            self.log.debug("Verifying nsr opdata path = %s", XPaths.nsr_opdata())
+
+            while True:
+                nsrs = yield from self.querier.get_nsr_opdatas()
+                if termination:
+                    if len(nsrs) != 0:
+                        for i in range(10):
+                            nsrs = yield from self.querier.get_nsr_opdatas()
+                            if len(nsrs) == 0:
+                                self.log.debug("No active NSR records found. NSR termination successful")
+                                return
+                        else:
+                            self.assertEqual(0, len(nsrs))
+                            self.log.error("Active NSR records found. NSR termination failed")
+
+                    else:
+                        self.log.debug("No active NSR records found. NSR termination successful")
+                        self.assertEqual(0, len(nsrs))
+                        return
+
+                nsr = nsrs[0]
+                self.log.debug("Got nsr record %s", nsr)
+                if nsr.operational_status == 'running':
+                    self.log.debug("!!! Rcvd NSR with running status !!!")
+                    self.assertEqual("configuring", nsr.config_status)
+                    break
+
+                self.log.debug("Rcvd NSR with %s status", nsr.operational_status)
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def verify_nsr_config(termination=False):
+            self.log.debug("Verifying nsr config path = %s", XPaths.nsr_config())
+
+            nsr_configs = yield from self.querier.get_nsr_configs()
+            self.assertEqual(1, len(nsr_configs))
+
+            nsr_config = nsr_configs[0]
+            self.assertEqual(
+                    "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+                    nsr_config.input_parameter[0].xpath,
+                    )
+
+        @asyncio.coroutine
+        def verify_nsr_config_status(termination=False, nsrid=None):
+            if termination is False and nsrid is not None:
+                self.log.debug("Verifying nsr config status path = %s", XPaths.nsr_opdata(nsrid))
+
+                loop_count = 6
+                loop_sleep = 10
+                while loop_count:
+                    loop_count -= 1
+                    yield from asyncio.sleep(loop_sleep, loop=self.loop)
+                    nsr_opdata_l = yield from self.querier.get_nsr_opdatas(nsrid)
+                    self.assertEqual(1, len(nsr_opdata_l))
+                    nsr_opdata = nsr_opdata_l[0].as_dict()
+                    if ("configured" == nsr_opdata['config_status']):
+                        print("\n###>>> NSR Config Status 'configured' OK <<<###\n")
+                        return
+                self.assertEqual("configured", nsr_opdata['config_status'])
+
+        @asyncio.coroutine
+        def verify_vnfr_record(termination=False):
+            self.log.debug("Verifying vnfr record path = %s, Termination=%d",
+                           XPaths.vnfr(), termination)
+            if termination:
+                for i in range(10):
+                    vnfrs = yield from self.querier.get_vnfrs()
+                    if len(vnfrs) == 0:
+                        return True
+
+                    for vnfr in vnfrs:
+                        self.log.debug("VNFR still exists = %s", vnfr)
+
+                    yield from asyncio.sleep(.5, loop=self.loop)
+
+
+                assert len(vnfrs) == 0
+
+            while True:
+                vnfrs = yield from self.querier.get_vnfrs()
+                if len(vnfrs) != 0 and termination is False:
+                    vnfr = vnfrs[0]
+                    self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
+                    if vnfr.operational_status == 'running':
+                        self.log.debug("!!! Rcvd VNFR with running status !!!")
+                        return True
+
+                    elif vnfr.operational_status == "failed":
+                        self.log.debug("!!! Rcvd VNFR with failed status !!!")
+                        return False
+
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+
+        @asyncio.coroutine
+        def verify_vnfr_cloud_account(vnf_index, cloud_account):
+            self.log.debug("Verifying vnfr record Cloud account for vnf index = %d is %s", vnf_index,cloud_account)
+            vnfrs = yield from self.querier.get_vnfrs()
+            cloud_accounts = [vnfr.cloud_account for vnfr in vnfrs if vnfr.member_vnf_index_ref == vnf_index]
+            self.log.debug("VNFR cloud account for index %d is %s", vnf_index,cloud_accounts[0])
+            assert cloud_accounts[0] == cloud_account
+
+        @asyncio.coroutine
+        def verify_vlr_record(termination=False):
+            vlr_xpath = XPaths.vlr()
+            self.log.debug("Verifying vlr record path = %s, termination: %s",
+                           vlr_xpath, termination)
+            res_iter = yield from self.dts.query_read(vlr_xpath)
+
+            for i in res_iter:
+                result = yield from i
+                if termination:
+                    self.assertIsNone(result)
+
+                self.log.debug("Got vlr record %s", result)
+
+        @asyncio.coroutine
+        def verify_vlrs(nsr_id, count=0):
+            while True:
+                nsrs = yield from self.querier.get_nsr_opdatas()
+                nsr = nsrs[0]
+                self.log.debug("Got nsr record %s", nsr)
+                if nsr.operational_status == 'running':
+                    self.log.debug("!!! Rcvd NSR with running status !!!")
+                    # Check the VLR count
+                    if (len(nsr.vlr)) == count:
+                        self.log.debug("NSR %s has %d VLRs", nsr_id, count)
+                        break
+
+                self.log.debug("Rcvd NSR %s with %s status", nsr_id, nsr.operational_status)
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def verify_nsd_ref_count(termination):
+            self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count())
+            res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count())
+
+            for i in res_iter:
+                result = yield from i
+                self.log.debug("Got nsd ref count record %s", result)
+
+        @asyncio.coroutine
+        def verify_vnfd_ref_count(termination):
+            self.log.debug("Verifying vnfd ref count= %s", XPaths.vnfd_ref_count())
+            res_iter = yield from self.dts.query_read(XPaths.vnfd_ref_count())
+
+            for i in res_iter:
+                result = yield from i
+                self.log.debug("Got vnfd ref count record %s", result)
+
+        @asyncio.coroutine
+        def verify_scale_group_reaches_state(nsr_id, scale_group, index, state, timeout=1000):
+            start_time = time.time()
+            instance_state = None
+            while (time.time() - start_time) < timeout:
+                results = yield from self.querier.get_nsr_opdatas(nsr_id=nsr_id)
+                if len(results) == 1:
+                    result = results[0]
+                    if len(result.scaling_group_record) == 0:
+                        continue
+
+                    if len(result.scaling_group_record[0].instance) == 0:
+                        continue
+
+                    instance = result.scaling_group_record[0].instance[0]
+                    self.assertEqual(instance.scaling_group_index_ref, index)
+
+                    instance_state = instance.op_status
+                    if instance_state == state:
+                        self.log.debug("Scale group instance reached %s state", state)
+                        return
+
+                yield from asyncio.sleep(1, loop=self.loop)
+
+            self.assertEqual(state, instance_state)
+
+        @asyncio.coroutine
+        def verify_results(termination=False, nsrid=None):
+            yield from verify_vnfr_record(termination)
+            #yield from verify_vlr_record(termination)
+            yield from verify_nsr_opdata(termination)
+            yield from verify_nsr_config(termination)
+            yield from verify_nsd_ref_count(termination)
+            yield from verify_vnfd_ref_count(termination)
+
+            # Config Manager
+            yield from verify_cm_state(termination, nsrid)
+            yield from verify_nsr_config_status(termination, nsrid)
+
+        @asyncio.coroutine
+        def verify_scale_instance(index):
+            self.log.debug("Verifying scale record path = %s, Termination=%d",
+                           XPaths.vnfr(), termination)
+            if termination:
+                for i in range(5):
+                    vnfrs = yield from self.querier.get_vnfrs()
+                    if len(vnfrs) == 0:
+                        return True
+
+                    for vnfr in vnfrs:
+                        self.log.debug("VNFR still exists = %s", vnfr)
+
+
+                assert len(vnfrs) == 0
+
+            while True:
+                vnfrs = yield from self.querier.get_vnfrs()
+                if len(vnfrs) != 0 and termination is False:
+                    vnfr = vnfrs[0]
+                    self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
+                    if vnfr.operational_status == 'running':
+                        self.log.debug("!!! Rcvd VNFR with running status !!!")
+                        return True
+
+                    elif vnfr.operational_status == "failed":
+                        self.log.debug("!!! Rcvd VNFR with failed status !!!")
+                        return False
+
+                self.log.debug("Sleeping for 10 seconds")
+                yield from asyncio.sleep(10, loop=self.loop)
+
+        @asyncio.coroutine
+        def terminate_ns(nsr_id):
+            xpath = XPaths.nsr_config(nsr_id)
+            self.log.debug("Terminating network service with path %s", xpath)
+            yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
+            self.log.debug("Terminated network service with path %s", xpath)
+
+        @asyncio.coroutine
+        def run_test():
+            yield from self.wait_tasklets()
+
+
+            cloud_type = "mock"
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+
+            yield from self.ping_pong.publish_desciptors()
+
+            # Attempt deleting VNFD not in use
+            yield from self.ping_pong.update_ping_vnfd()
+
+            # Attempt updating NSD not in use
+            yield from self.ping_pong.update_nsd()
+
+            # Attempt deleting VNFD not in use
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            # Attempt deleting NSD not in use
+            yield from self.ping_pong.delete_nsd()
+
+            yield from self.ping_pong.publish_desciptors()
+
+            nsr_id = yield from self.nsr_publisher.publish()
+
+            yield from verify_results(nsrid=nsr_id)
+
+            # yield from self.nsr_publisher.create_scale_group_instance("ping_group", 1)
+
+            # yield from verify_scale_group_reaches_state(nsr_id, "ping_group", 1, "running")
+
+            # yield from self.nsr_publisher.delete_scale_group_instance("ping_group", 1)
+
+            yield from asyncio.sleep(10, loop=self.loop)
+
+            # Attempt deleting VNFD in use
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            # Attempt updating NSD in use
+            yield from self.ping_pong.update_nsd()
+
+            # Update NSD in use with new VL
+            yield from self.nsr_publisher.add_nsr_vl()
+
+            # Verify the new VL has been added
+            yield from verify_vlrs(nsr_id, count=2)
+
+            # Delete the added VL
+            yield from self.nsr_publisher.del_nsr_vl()
+
+            # Verify the new VL has been added
+            yield from verify_vlrs(nsr_id, count=1)
+
+            # Attempt deleting NSD in use
+            yield from self.ping_pong.delete_nsd()
+
+            yield from terminate_ns(nsr_id)
+
+            yield from asyncio.sleep(25, loop=self.loop)
+            self.log.debug("Verifying termination results")
+            yield from verify_results(termination=True, nsrid=nsr_id)
+            self.log.debug("Verified termination results")
+
+            # Multi site NS case
+            self.log.debug("Testing multi site NS")
+            self.nsr_publisher.update_vnf_cloud_map({1:"mock_account1",2:"mock_account"})
+            nsr_id = yield from self.nsr_publisher.publish()
+
+            yield from verify_results(nsrid=nsr_id)
+            yield from verify_vnfr_cloud_account(1,"mock_account1")
+            yield from verify_vnfr_cloud_account(2,"mock_account")
+            yield from verify_vlrs(nsr_id, count=2)
+
+            yield from terminate_ns(nsr_id)
+
+            yield from asyncio.sleep(25, loop=self.loop)
+            self.log.debug("Verifying termination results for multi site NS")
+            yield from verify_results(termination=True, nsrid=nsr_id)
+            self.log.debug("Verified termination results for multi site NS")
+
+            self.log.debug("Attempting to delete VNFD for real")
+            yield from self.ping_pong.delete_ping_vnfd()
+
+            self.log.debug("Attempting to delete NSD for real")
+            yield from self.ping_pong.delete_nsd()
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+
+def main():
+    plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
+    if 'VNS_DIR' not in os.environ:
+        os.environ['VNS_DIR'] = os.path.join(plugin_dir, 'rwvns')
+
+    if 'VNFM_DIR' not in os.environ:
+        os.environ['VNFM_DIR'] = os.path.join(plugin_dir, 'rwvnfm')
+
+    if 'NSM_DIR' not in os.environ:
+        os.environ['NSM_DIR'] = os.path.join(plugin_dir, 'rwnsm')
+
+    if 'RM_DIR' not in os.environ:
+        os.environ['RM_DIR'] = os.path.join(plugin_dir, 'rwresmgrtasklet')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    ManoTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwlaunchpad/test/mgmt_recovery.py b/rwlaunchpad/test/mgmt_recovery.py
new file mode 100755 (executable)
index 0000000..29f0ab0
--- /dev/null
@@ -0,0 +1,385 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import logging
+import os
+import resource
+import socket
+import sys
+import subprocess
+import shlex
+import shutil
+import netifaces
+
+from rift.rwlib.util import certs
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+import rift.vcs
+import rift.vcs.core as core
+import rift.vcs.demo
+import rift.vcs.vms
+
+import rift.rwcal.cloudsim
+import rift.rwcal.cloudsim.net
+
+from rift.vcs.ext import ClassProperty
+
+logger = logging.getLogger(__name__)
+
+
+class NsmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='network-services-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a NsmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(NsmTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
+    plugin_name = ClassProperty('rwnsmtasklet')
+
+
+class VnsTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a network services manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-service', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnsTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnsTasklet, self).__init__(name=name, uid=uid,
+                                         config_ready=config_ready,
+                                         recovery_action=recovery_action,
+                                         data_storetype=data_storetype,
+                                        )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
+    plugin_name = ClassProperty('rwvnstasklet')
+
+
+class VnfmTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a virtual network function manager tasklet.
+    """
+
+    def __init__(self, name='virtual-network-function-manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a VnfmTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(VnfmTasklet, self).__init__(name=name, uid=uid,
+                                          config_ready=config_ready,
+                                          recovery_action=recovery_action,
+                                          data_storetype=data_storetype,
+                                         )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
+    plugin_name = ClassProperty('rwvnfmtasklet')
+
+
+class ResMgrTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Resource-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ResMgrTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ResMgrTasklet, self).__init__(name=name, uid=uid,
+                                            config_ready=config_ready,
+                                            recovery_action=recovery_action,
+                                            data_storetype=data_storetype,
+                                           )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
+    plugin_name = ClassProperty('rwresmgrtasklet')
+
+
+class MonitorTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a tasklet that is used to monitor NFVI metrics.
+    """
+
+    def __init__(self, name='nfvi-metrics-monitor', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a MonitorTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+
+        """
+        super(MonitorTasklet, self).__init__(name=name, uid=uid,
+                                             config_ready=config_ready,
+                                             recovery_action=recovery_action,
+                                             data_storetype=data_storetype,
+                                            )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
+    plugin_name = ClassProperty('rwmonitor')
+
+
+def get_ui_ssl_args():
+    """Returns the SSL parameter string for launchpad UI processes"""
+
+    try:
+        use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+    except certs.BootstrapSslMissingException:
+        logger.error('No bootstrap certificates found.  Disabling UI SSL')
+        use_ssl = False
+
+    # If we're not using SSL, no SSL arguments are necessary
+    if not use_ssl:
+        return ""
+
+    return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
+
+
+class UIServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.MC.UI",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(UIServer, self).__init__(
+                name=name,
+                exe="./usr/share/rw.ui/skyquake/scripts/launch_ui.sh",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return get_ui_ssl_args()
+
+
+class RedisServer(rift.vcs.NativeProcess):
+    def __init__(self, name="RW.Redis.Server",
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        super(RedisServer, self).__init__(
+                name=name,
+                exe="/usr/bin/redis-server",
+                config_ready=config_ready,
+                recovery_action=recovery_action,
+                data_storetype=data_storetype,
+                )
+
+    @property
+    def args(self):
+        return "./usr/bin/active_redis.conf --port 9999"
+
+class ConfigManagerTasklet(rift.vcs.core.Tasklet):
+    """
+    This class represents a Resource Manager tasklet.
+    """
+
+    def __init__(self, name='Configuration-Manager', uid=None,
+                 config_ready=True,
+                 recovery_action=core.RecoveryType.FAILCRITICAL.value,
+                 data_storetype=core.DataStore.NOSTORE.value,
+                 ):
+        """
+        Creates a ConfigManagerTasklet object.
+
+        Arguments:
+            name  - the name of the tasklet
+            uid   - a unique identifier
+        """
+        super(ConfigManagerTasklet, self).__init__(name=name, uid=uid,
+                                                   config_ready=config_ready,
+                                                   recovery_action=recovery_action,
+                                                   data_storetype=data_storetype,
+                                                  )
+
+    plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
+    plugin_name = ClassProperty('rwconmantasklet')
+
+
+class Demo(rift.vcs.demo.Demo):
+    def __init__(self,mgmt_ip_list):
+
+        procs = [
+            ConfigManagerTasklet(),
+            UIServer(),
+            RedisServer(),
+            rift.vcs.RestPortForwardTasklet(),
+            rift.vcs.RestconfTasklet(),
+            rift.vcs.RiftCli(),
+            rift.vcs.uAgentTasklet(),
+            rift.vcs.Launchpad(),
+            ]
+
+        standby_procs = [
+            RedisServer(),
+            rift.vcs.uAgentTasklet(mode_active=False),
+            ]
+
+        restart_procs = [
+            VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=core.DataStore.REDIS.value),
+            ]
+        super(Demo, self).__init__(
+            # Construct the system. This system consists of 1 cluster in 1
+            # colony. The master cluster houses CLI and management VMs
+            sysinfo = rift.vcs.SystemInfo(
+                    zookeeper=rift.vcs.manifest.RaZookeeper(zake=False, master_ip=mgmt_ip_list[0]),
+                    colonies=[
+                            rift.vcs.Colony(
+                                name='master',
+                                uid=1,
+                                clusters=[
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-1',
+                                        ip=mgmt_ip_list[0],
+                                        procs=procs,
+                                        restart_procs=restart_procs,
+                                        ),
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-2',
+                                        ip=mgmt_ip_list[1],
+                                        standby_procs=standby_procs,
+                                        start=False,
+                                        ),
+                                    ] if len(mgmt_ip_list) == 2 else [
+                                    rift.vcs.VirtualMachine(
+                                        name='vm-templ-1',
+                                        ip=mgmt_ip_list[0],
+                                        procs=procs,
+                                        restart_procs=restart_procs,
+                                        ),
+                                    ]
+                                )
+                            ],
+                        ),
+
+            # Define the generic portmap.
+            port_map = {},
+
+            # Define a mapping from the placeholder logical names to the real
+            # port names for each of the different modes supported by this demo.
+            port_names = {
+                'ethsim': {
+                },
+                'pci': {
+                }
+            },
+
+            # Define the connectivity between logical port names.
+            port_groups = {},
+        )
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='%(asctime)-15s %(levelname)s %(message)s')
+
+    # Create a parser which includes all generic demo arguments
+    parser = rift.vcs.demo.DemoArgParser()
+
+    args = parser.parse_args(argv)
+
+    # Disable loading any kernel modules for the launchpad VM
+    # since it doesn't need it and it will fail within containers
+    os.environ["NO_KERNEL_MODS"] = "1"
+
+    # Remove the persistant DTS recovery files 
+    for f in os.listdir(os.environ["INSTALLDIR"]):
+        if f.endswith(".db"):
+            os.remove(os.path.join(os.environ["INSTALLDIR"], f))
+
+    #load demo info and create Demo object
+    demo = Demo(args.mgmt_ip_list)
+
+    # Create the prepared system from the demo
+    system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args, 
+              northbound_listing="cli_launchpad_schema_listing.txt",
+              netconf_trace_override=True)
+
+    confd_ip = socket.gethostbyname(socket.gethostname())
+    intf = netifaces.ifaddresses('eth0')
+    if intf and netifaces.AF_INET in intf and len(intf[netifaces.AF_INET]):
+       confd_ip = intf[netifaces.AF_INET][0]['addr']
+    rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
+
+    # Start the prepared system
+    system.start()
+
+
+if __name__ == "__main__":
+    resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+    try:
+        main()
+    except rift.vcs.demo.ReservationError:
+        print("ERROR: unable to retrieve a list of IP addresses from the reservation system")
+        sys.exit(1)
+    except rift.vcs.demo.MissingModeError:
+        print("ERROR: you need to provide a mode to run the script")
+        sys.exit(1)
+    finally:
+        os.system("stty sane")
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_test.py
new file mode 100644 (file)
index 0000000..0a8d6ba
--- /dev/null
@@ -0,0 +1,306 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import gi
+
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_KT_UTM_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_utm"
+    )
+
+RW_KT_UTM_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/utm_only"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def kt_utm_vnfd_package_file():
+    ktutm_pkg_file = os.path.join(
+            RW_KT_UTM_PKG_INSTALL_DIR,
+            "kt_utm_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktutm_pkg_file):
+        raise_package_error()
+
+    return ktutm_pkg_file
+
+@pytest.fixture(scope='session')
+def utm_only_nsd_package_file():
+      ktutm_nsd_pkg_file = os.path.join(
+              RW_KT_UTM_NSD_PKG_INSTALL_DIR,
+              "utm_only_nsd.tar.gz",
+              )
+      if not os.path.exists(ktutm_nsd_pkg_file):
+          raise_package_error()
+  
+      return ktutm_nsd_pkg_file
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+def create_nsr_from_nsd_id(nsd_id):
+      nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+      nsr.id = str(uuid.uuid4())
+      nsr.name = "UTM-only"
+      nsr.short_name = "UTM-only"
+      nsr.description = "1 VNFs with 5 VLs"
+      nsr.nsd_ref = nsd_id
+      nsr.admin_status = "ENABLED"
+  
+      return nsr
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.13:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
+        logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "kt_utm_vnfd"
+
+    def test_onboard_utm_only_nsd(self, logger, nsd_proxy, utm_only_nsd_package_file):
+          logger.info("Onboarding utm_onlynsd package: %s", utm_only_nsd_package_file)
+          trans_id = upload_descriptor(logger, utm_only_nsd_package_file)
+          wait_unboard_transaction_finished(logger, trans_id)
+  
+          catalog = nsd_proxy.get_config('/nsd-catalog')
+          nsds = catalog.nsd
+          assert len(nsds) == 1, "There should only be a single nsd"
+          nsd = nsds[0]
+  
+    def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+          catalog = nsd_proxy.get_config('/nsd-catalog')
+          nsd = catalog.nsd[0]
+  
+          nsr = create_nsr_from_nsd_id(nsd.id)
+          nsr_proxy.merge_config('/ns-instance-config', nsr)
+  
+          nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+          nsrs = nsr_opdata.nsr
+          assert len(nsrs) == 1
+          assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py b/rwlaunchpad/test/pytest/lp_kt_utm_wims_test.py
new file mode 100644 (file)
index 0000000..705565b
--- /dev/null
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import gi
+
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_KT_UTM_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_utm"
+    )
+
+RW_KT_WIMS_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/kt_wims"
+    )
+
+RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/utm_wims"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def kt_utm_vnfd_package_file():
+    ktutm_pkg_file = os.path.join(
+            RW_KT_UTM_PKG_INSTALL_DIR,
+            "kt_utm_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktutm_pkg_file):
+        raise_package_error()
+
+    return ktutm_pkg_file
+
+@pytest.fixture(scope='session')
+def kt_wims_vnfd_package_file():
+    ktwims_pkg_file = os.path.join(
+            RW_KT_WIMS_PKG_INSTALL_DIR,
+            "kt_wims_vnfd.tar.gz",
+            )
+    if not os.path.exists(ktwims_pkg_file):
+        raise_package_error()
+
+    return ktwims_pkg_file
+
+@pytest.fixture(scope='session')
+def utm_wims_nsd_package_file():
+      ktutm_wims_nsd_pkg_file = os.path.join(
+              RW_KT_UTM_WIMS_NSD_PKG_INSTALL_DIR,
+              "utm_wims_nsd.tar.gz",
+              )
+      if not os.path.exists(ktutm_wims_nsd_pkg_file):
+          raise_package_error()
+  
+      return ktutm_wims_nsd_pkg_file
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+def create_nsr_from_nsd_id(nsd_id):
+      nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+      nsr.id = str(uuid.uuid4())
+      nsr.name = "UTM-WIMS"
+      nsr.short_name = "UTM-WIMS"
+      nsr.description = "2 VNFs with 4 VLs"
+      nsr.nsd_ref = nsd_id
+      nsr.admin_status = "ENABLED"
+  
+      return nsr
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
+        logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "kt_utm_vnfd"
+
+    def test_onboard_ktwims_vnfd(self, logger, vnfd_proxy, kt_wims_vnfd_package_file):
+        logger.info("Onboarding kt_wims_vnfd package: %s", kt_wims_vnfd_package_file)
+        trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should only be two vnfd"
+        assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_utm_wims_nsd(self, logger, nsd_proxy, utm_wims_nsd_package_file):
+        logger.info("Onboarding utm_wims_nsd package: %s", utm_wims_nsd_package_file)
+        trans_id = upload_descriptor(logger, utm_wims_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+  
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+  
+    def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+  
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
diff --git a/rwlaunchpad/test/pytest/lp_test.py b/rwlaunchpad/test/pytest/lp_test.py
new file mode 100644 (file)
index 0000000..b987b35
--- /dev/null
@@ -0,0 +1,390 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+import datetime
+
+import gi
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        RwBaseYang,
+        RwCloudYang,
+        RwIwpYang,
+        RwlogMgmtYang,
+        RwNsmYang,
+        RwNsrYang,
+        RwResourceMgrYang,
+        RwConmanYang,
+        RwVnfdYang,
+        VldYang,
+        )
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_PING_PONG_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_ROOT"],
+    "images"
+    )
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+
+@pytest.fixture(scope='module')
+def rwlog_mgmt_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwlogMgmtYang)
+
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+
+@pytest.fixture(scope='session')
+def ping_vnfd_package_file():
+    ping_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "ping_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(ping_pkg_file):
+        raise_package_error()
+
+    return ping_pkg_file
+
+
+@pytest.fixture(scope='session')
+def pong_vnfd_package_file():
+    pong_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "pong_vnfd_with_image.tar.gz",
+            )
+    if not os.path.exists(pong_pkg_file):
+        raise_package_error()
+
+    return pong_pkg_file
+
+
+@pytest.fixture(scope='session')
+def ping_pong_nsd_package_file():
+    ping_pong_pkg_file = os.path.join(
+            RW_PING_PONG_PKG_INSTALL_DIR,
+            "ping_pong_nsd.tar.gz",
+            )
+    if not os.path.exists(ping_pong_pkg_file):
+        raise_package_error()
+
+    return ping_pong_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
+    nsr.short_name = "nsr_short_name"
+    nsr.description = "This is a description"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+    nsr.cloud_account = "openstack"
+
+    param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
+    param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+    param.value = "rift-o-matic"
+
+    nsr.input_parameter.append(param)
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+                transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_logging(self, rwlog_mgmt_proxy):
+        logging = RwlogMgmtYang.Logging.from_dict({
+                "console": {
+                    "on": True,
+                    "filter": {
+                        "category": [{
+                            "name": "rw-generic",
+                            "severity": "error"
+                            }],
+                        }
+                    }
+                })
+        rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
+
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccount()
+        # cloud_account.name = "cloudsim_proxy"
+        # cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "openstack"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.96.4.2:5000/v3/'
+        cloud_account.openstack.tenant = 'mano1'
+        cloud_account.openstack.mgmt_network = 'private1'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud/account", cloud_account)
+
+    def test_onboard_ping_vnfd(self, logger, vnfd_proxy, ping_vnfd_package_file):
+        logger.info("Onboarding ping_vnfd package: %s", ping_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ping_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should only be a single vnfd"
+        vnfd = vnfds[0]
+        assert vnfd.name == "ping_vnfd"
+
+    def test_onboard_pong_vnfd(self, logger, vnfd_proxy, pong_vnfd_package_file):
+        logger.info("Onboarding pong_vnfd package: %s", pong_vnfd_package_file)
+        trans_id = upload_descriptor(logger, pong_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ping_pong_nsd(self, logger, nsd_proxy, ping_pong_nsd_package_file):
+        logger.info("Onboarding ping_pong_nsd package: %s", ping_pong_nsd_package_file)
+        trans_id = upload_descriptor(logger, ping_pong_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "ping_pong_nsd"
+
+    def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        rwnsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+        # logger.info("Waiting up to 30 seconds for ping and pong components to show "
+        #          "up in show tasklet info")
+
+        # start_time = time.time()
+        # while (time.time() - start_time) < 30:
+        #     vcs_info = base_proxy.get('/vcs/info')
+        #     components = vcs_info.components.component_info
+
+        #     def find_component_by_name(name):
+        #         for component in components:
+        #             if name in component.component_name:
+        #                 return component
+
+        #         logger.warning("Did not find %s component name in show tasklet info",
+        #                     name)
+
+        #         return None
+
+        #     """
+        #     ping_cluster_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_cluster"
+        #             )
+        #     if ping_cluster_component is None:
+        #         continue
+
+        #     pong_cluster_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_cluster"
+        #             )
+        #     if pong_cluster_component is None:
+        #         continue
+        #     """
+
+        #     ping_vm_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_vm"
+        #             )
+        #     if ping_vm_component is None:
+        #         continue
+
+        #     pong_vm_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_vm"
+        #             )
+        #     if pong_vm_component is None:
+        #         continue
+
+        #     ping_proc_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_proc"
+        #             )
+        #     if ping_proc_component is None:
+        #         continue
+
+        #     pong_proc_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_proc"
+        #             )
+        #     if pong_proc_component is None:
+        #         continue
+
+        #     ping_tasklet_component = find_component_by_name(
+        #             "rw_ping_vnfd:rwping_tasklet"
+        #             )
+        #     if ping_tasklet_component is None:
+        #         continue
+
+        #     pong_tasklet_component = find_component_by_name(
+        #             "rw_pong_vnfd:rwpong_tasklet"
+        #             )
+        #     if pong_tasklet_component is None:
+        #         continue
+
+        #     logger.info("TEST SUCCESSFUL: All ping and pong components were found in show tasklet info")
+        #     break
+
+        # else:
+        #     assert False, "Did not find all ping and pong component in time"
+
+    #def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+    #    nsr_configs = nsr_proxy.get_config('/ns-instance-config')
+    #    nsr = nsr_configs.nsr[0]
+    #    nsr_id = nsr.id
+
+    #    nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id))
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_epa_test.py
new file mode 100644 (file)
index 0000000..16a8990
--- /dev/null
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_2vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd_with_epa.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_2vrouter_ts_nsd_package_file():
+    tg_2vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_2vrouter_ts_nsd_with_epa.tar.gz",
+            )
+    if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_2vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-2Vrouter-TS EPA"
+    nsr.short_name = "TG-2Vrouter-TS EPA"
+    nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+        assert nsd.short_name == "tg_2vrouter_ts_nsd"
+
+    def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py b/rwlaunchpad/test/pytest/lp_tg_2vrouter_ts_test.py
new file mode 100644 (file)
index 0000000..ed00a25
--- /dev/null
@@ -0,0 +1,325 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_2vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_2vrouter_ts_nsd_package_file():
+    tg_2vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_2VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_2vrouter_ts_nsd.tar.gz",
+            )
+    if not os.path.exists(tg_2vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_2vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-2Vrouter-TS EPA"
+    nsr.short_name = "TG-2Vrouter-TS EPA"
+    nsr.description = "4 VNFs with Trafgen, 2 Vrouters and Trafsink EPA"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_2vrouter_ts_nsd(self, logger, nsd_proxy, tg_2vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_2vrouter_ts nsd package: %s", tg_2vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+        assert nsd.short_name == "tg_2vrouter_ts_nsd"
+
+    def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py b/rwlaunchpad/test/pytest/lp_tg_vrouter_ts_epa_sriov_test.py
new file mode 100644 (file)
index 0000000..4d6e345
--- /dev/null
@@ -0,0 +1,323 @@
+#!/usr/bin/env python
+"""
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+@file lp_3vnfs_test.py
+@author Austin Cormier (Austin.Cormier@riftio.com)
+@date 10/15/2015
+@brief Launchpad Module Test ExtVNF
+"""
+
+import json
+import logging
+import os
+import pytest
+import shlex
+import requests
+import subprocess
+import time
+import uuid
+
+import gi
+gi.require_version('RwIwpYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+
+
+from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+
+logging.basicConfig(level=logging.DEBUG)
+
+
+RW_VROUTER_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/vrouter"
+    )
+RW_TRAFGEN_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafgen"
+    )
+RW_TRAFSINK_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/vnfds/trafsink"
+    )
+RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR = os.path.join(
+    os.environ["RIFT_INSTALL"],
+    "usr/rift/mano/nsds/tg_vrouter_ts"
+    )
+
+
+class PackageError(Exception):
+    pass
+
+
+def raise_package_error():
+    raise PackageError("Could not find ns packages")
+
+
+@pytest.fixture(scope='module')
+def iwp_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwIwpYang)
+
+@pytest.fixture(scope='module')
+def resource_mgr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwResourceMgrYang)
+
+
+@pytest.fixture(scope='module')
+def cloud_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwVnfdYang)
+
+
+@pytest.fixture(scope='module')
+def vld_proxy(request, mgmt_session):
+    return mgmt_session.proxy(VldYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsdYang)
+
+
+@pytest.fixture(scope='module')
+def nsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(NsrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def base_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwBaseYang)
+
+@pytest.fixture(scope='module')
+def so_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='module')
+def nsm_proxy(request, mgmt_session):
+    return mgmt_session.proxy(RwNsmYang)
+
+@pytest.fixture(scope='session')
+def vrouter_vnfd_package_file():
+    vrouter_pkg_file = os.path.join(
+            RW_VROUTER_PKG_INSTALL_DIR,
+            "vrouter_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(vrouter_pkg_file):
+        raise_package_error()
+
+    return vrouter_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vnfd_package_file():
+    tg_pkg_file = os.path.join(
+            RW_TRAFGEN_PKG_INSTALL_DIR,
+            "trafgen_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(tg_pkg_file):
+        raise_package_error()
+
+    return tg_pkg_file
+
+@pytest.fixture(scope='session')
+def ts_vnfd_package_file():
+    ts_pkg_file = os.path.join(
+            RW_TRAFSINK_PKG_INSTALL_DIR,
+            "trafsink_vnfd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(ts_pkg_file):
+        raise_package_error()
+
+    return ts_pkg_file
+
+@pytest.fixture(scope='session')
+def tg_vrouter_ts_nsd_package_file():
+    tg_vrouter_ts_nsd_pkg_file = os.path.join(
+            RW_TG_VROUTER_TS_NSD_PKG_INSTALL_DIR,
+            "tg_vrouter_ts_nsd_with_epa_sriov.tar.gz",
+            )
+    if not os.path.exists(tg_vrouter_ts_nsd_pkg_file):
+        raise_package_error()
+
+    return tg_vrouter_ts_nsd_pkg_file
+
+
+def create_nsr_from_nsd_id(nsd_id):
+    nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+    nsr.id = str(uuid.uuid4())
+    nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
+    nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
+    nsr.description = "3 VNFs with Trafgen, Vrouter and Trafsink EPA SRIOV"
+    nsr.nsd_ref = nsd_id
+    nsr.admin_status = "ENABLED"
+
+    return nsr
+
+
+def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
+    curl_cmd = 'curl -F "descriptor=@{file}" http://{host}:4567/api/upload'.format(
+            file=descriptor_file,
+            host=host,
+            )
+    logger.debug("Uploading descriptor %s using cmd: %s", descriptor_file, curl_cmd)
+    stdout = subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
+
+    json_out = json.loads(stdout)
+    transaction_id = json_out["transaction_id"]
+
+    return transaction_id
+
+
+class DescriptorOnboardError(Exception):
+    pass
+
+
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+    logger.info("Waiting for onboard trans_id %s to complete",
+             transaction_id)
+    start_time = time.time()
+    while (time.time() - start_time) < timeout_secs:
+        r = requests.get(
+                'http://{host}:4567/api/upload/{t_id}/state'.format(
+                    host=host, t_id=transaction_id
+                    )
+                )
+        state = r.json()
+        if state["status"] == "pending":
+            time.sleep(1)
+            continue
+
+        elif state["status"] == "success":
+            logger.info("Descriptor onboard was successful")
+            return
+
+        else:
+            raise DescriptorOnboardError(state)
+
+    if state["status"] != "success":
+        raise DescriptorOnboardError(state)
+
+@pytest.mark.incremental
+class TestLaunchpadStartStop(object):
+    def test_configure_cloud_account(self, cloud_proxy, logger):
+        cloud_account = RwCloudYang.CloudAccountConfig()
+        #cloud_account.name = "cloudsim_proxy"
+        #cloud_account.account_type = "cloudsim_proxy"
+        cloud_account.name = "riftuser1"
+        cloud_account.account_type = "openstack"
+        cloud_account.openstack.key = 'pluto'
+        cloud_account.openstack.secret = 'mypasswd'
+        cloud_account.openstack.auth_url = 'http://10.66.4.xx:5000/v3/'
+        cloud_account.openstack.tenant = 'demo'
+        cloud_account.openstack.mgmt_network = 'private'
+
+        cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+
+    def test_configure_pools(self, resource_mgr_proxy):
+        pools = RwResourceMgrYang.ResourcePools.from_dict({
+            "pools": [{ "name": "vm_pool_a",
+                        "resource_type": "compute",
+                        "pool_type" : "dynamic"},
+                      {"name": "network_pool_a",
+                       "resource_type": "network",
+                       "pool_type" : "dynamic",}]})
+
+        resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+
+    def test_configure_resource_orchestrator(self, so_proxy):
+        cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
+                                                'ro_port'      :  2022,
+                                                'ro_username'  : 'admin',
+                                                'ro_password'  : 'admin'})
+        so_proxy.merge_config('/rw-conman:cm-config', cfg)
+
+    def test_configure_service_orchestrator(self, nsm_proxy):
+        cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
+                                              'cm_port'      :  2022,
+                                              'cm_username'  : 'admin',
+                                              'cm_password'  : 'admin'})
+        nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+
+    
+    def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
+        logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
+        trans_id = upload_descriptor(logger, tg_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 1, "There should be one vnfds"
+        assert "trafgen_vnfd" in [vnfds[0].name]
+
+    def test_onboard_vrouter_vnfd(self, logger, vnfd_proxy, vrouter_vnfd_package_file):
+        logger.info("Onboarding vrouter_vnfd package: %s", vrouter_vnfd_package_file)
+        trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 2, "There should be two vnfds"
+        assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
+
+    def test_onboard_ts_vnfd(self, logger, vnfd_proxy, ts_vnfd_package_file):
+        logger.info("Onboarding trafsink_vnfd package: %s", ts_vnfd_package_file)
+        trans_id = upload_descriptor(logger, ts_vnfd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = vnfd_proxy.get_config('/vnfd-catalog')
+        vnfds = catalog.vnfd
+        assert len(vnfds) == 3, "There should be three vnfds"
+        assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
+
+    def test_onboard_tg_vrouter_ts_nsd(self, logger, nsd_proxy, tg_vrouter_ts_nsd_package_file):
+        logger.info("Onboarding tg_vrouter_ts nsd package: %s", tg_vrouter_ts_nsd_package_file)
+        trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file)
+        wait_unboard_transaction_finished(logger, trans_id)
+
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsds = catalog.nsd
+        assert len(nsds) == 1, "There should only be a single nsd"
+        nsd = nsds[0]
+        assert nsd.name == "tg_vrouter_ts_nsd"
+
+    def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
+        catalog = nsd_proxy.get_config('/nsd-catalog')
+        nsd = catalog.nsd[0]
+
+        nsr = create_nsr_from_nsd_id(nsd.id)
+        nsr_proxy.merge_config('/ns-instance-config', nsr)
+
+        nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+        nsrs = nsr_opdata.nsr
+        assert len(nsrs) == 1
+        assert nsrs[0].ns_instance_config_ref == nsr.id
+
+
diff --git a/rwlaunchpad/test/racfg/lprecovery_test.racfg b/rwlaunchpad/test/racfg/lprecovery_test.racfg
new file mode 100644 (file)
index 0000000..43e07aa
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "test_name":"TC_LPRECOVERY_TEST",
+  "commandline":"./launchpad_recovery",
+  "target_vm":"VM",
+  "test_description":"Test targeting launchpad recovery feature",
+  "run_as_root": true,
+  "status":"broken",
+  "keywords":["nightly","smoke"],
+  "timelimit": 4800,
+  "networks":[],
+  "vms":[
+    {
+      "name": "VM",
+      "memory": 8192,
+      "cpus": 2
+    }
+  ]
+}
+
diff --git a/rwlaunchpad/test/tosca_ut.py b/rwlaunchpad/test/tosca_ut.py
new file mode 100755 (executable)
index 0000000..40efe41
--- /dev/null
@@ -0,0 +1,183 @@
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.io Inc                                               #
+#                                                                          #
+# Licensed under the Apache License, Version 2.0 (the "License");          #
+# you may not use this file except in compliance with the License.         #
+# You may obtain a copy of the License at                                  #
+#                                                                          #
+#     http://www.apache.org/licenses/LICENSE-2.0                           #
+#                                                                          #
+# Unless required by applicable law or agreed to in writing, software      #
+# distributed under the License is distributed on an "AS IS" BASIS,        #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and      #
+# limitations under the License.                                           #
+############################################################################
+
+import argparse
+import logging
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import unittest
+import xmlrunner
+
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+
+from rift.mano.utils.compare_desc import CompareDescShell
+
+from rift.tasklets.rwlaunchpad.tosca import ExportTosca
+from rift.tasklets.rwlaunchpad.tosca import ImportTosca
+
+from rift.package.package import TarPackageArchive
+
+class PingPongDescriptors(object):
+    def __init__(self):
+        ping_vnfd, pong_vnfd, nsd = \
+                ping_pong_nsd.generate_ping_pong_descriptors(
+                    pingcount=1,
+                    external_vlr_count=1,
+                    internal_vlr_count=0,
+                    num_vnf_vms=1,
+                    ping_md5sum='1234567890abcdefg',
+                    pong_md5sum='1234567890abcdefg',
+                    mano_ut=False,
+                    use_scale_group=True,
+                    use_mon_params=True,
+                    use_placement_group=False,
+                    use_ns_init_conf=False,
+                )
+        self.ping_pong_nsd = nsd.descriptor.nsd[0]
+        self.ping_vnfd = ping_vnfd.descriptor.vnfd[0]
+        self.pong_vnfd = pong_vnfd.descriptor.vnfd[0]
+
+
+class ToscaTestCase(unittest.TestCase):
+    """ Unittest for YANG to TOSCA and back translations
+
+    This generates the Ping Pong descrptors using the script
+    in examles and then converts it to TOSCA and back to YANG.
+    """
+    default_timeout = 0
+    top_dir = __file__[:__file__.find('/modules/core/')]
+    log_level = logging.WARN
+    log = None
+
+    @classmethod
+    def setUpClass(cls):
+        fmt = logging.Formatter(
+                '%(asctime)-23s %(levelname)-5s  (%(name)s@%(process)d:%(filename)s:%(lineno)d) - %(message)s')
+        stderr_handler = logging.StreamHandler(stream=sys.stderr)
+        stderr_handler.setFormatter(fmt)
+        logging.basicConfig(level=cls.log_level)
+        cls.log = logging.getLogger('tosca-ut')
+        cls.log.addHandler(stderr_handler)
+
+    def setUp(self):
+        """Run before each test method to initialize test environment."""
+
+        super(ToscaTestCase, self).setUp()
+        self.output_dir = tempfile.mkdtemp()
+
+    def compare_dict(self, gen_d, exp_d):
+        gen = "--generated="+str(gen_d)
+        exp = "--expected="+str(exp_d)
+        CompareDescShell.compare_dicts(gen, exp, log=self.log)
+
+    def yang_to_tosca(self, descs):
+        """Convert YANG model to TOSCA model"""
+        pkg = ExportTosca(self.log)
+        nsd_id = pkg.add_nsd(descs.ping_pong_nsd)
+        pkg.add_vnfd(nsd_id, descs.ping_vnfd)
+        pkg.add_vnfd(nsd_id, descs.pong_vnfd)
+
+        return pkg.create_archive('ping_pong_nsd', self.output_dir)
+
+    def tosca_to_yang(self, tosca_file):
+        """Convert TOSCA model to YANG model"""
+        if ImportTosca.is_tosca_package(tosca_file):
+            # This could be a tosca package, try processing
+            tosca = ImportTosca(self.log, tosca_file, out_dir=self.output_dir)
+            files = tosca.translate()
+            if files is None or len(files) < 3:
+                raise ValueError("Could not process as a "
+                                 "TOSCA package {}: {}".format(tosca_file, files))
+            else:
+                 self.log.info("Tosca package was translated successfully")
+                 return files
+        else:
+            raise ValueError("Not a valid TOSCA archive: {}".
+                             format(tosca_file))
+
+    def compare_descs(self, descs, yang_files):
+        """Compare the sescriptors generated with original"""
+        for yang_file in yang_files:
+            if tarfile.is_tarfile(yang_file):
+                with open(yang_file, "r+b") as tar:
+                    archive = TarPackageArchive(self.log, tar)
+                    pkg = archive.create_package()
+                    desc_type = pkg.descriptor_type
+                    if desc_type == 'nsd':
+                        nsd_yang = pkg.descriptor_msg.as_dict()
+                        self.compare_dict(nsd_yang,
+                                          descs.ping_pong_nsd.as_dict())
+                    elif desc_type == 'vnfd':
+                        vnfd_yang = pkg.descriptor_msg.as_dict()
+                        if 'ping_vnfd' == vnfd_yang['name']:
+                            self.compare_dict(vnfd_yang,
+                                              descs.ping_vnfd.as_dict())
+                        elif 'pong_vnfd' == vnfd_yang['name']:
+                            self.compare_dict(vnfd_yang,
+                                              descs.pong_vnfd.as_dict())
+                        else:
+                            raise Exception("Unknown descriptor type {} found: {}".
+                                            format(desc_type, pkg.files))
+            else:
+                raise Exception("Did not find a valid tar file for yang model: {}".
+                                format(yang_file))
+
+    def test_output(self):
+        try:
+            # Generate the Ping Pong descriptors
+            descs = PingPongDescriptors()
+
+            # Translate the descriptors to TOSCA
+            tosca_file = self.yang_to_tosca(descs)
+
+            # Now translate back to YANG
+            yang_files = self.tosca_to_yang(tosca_file)
+
+            # Compare the generated YANG to original
+            self.compare_descs(descs, yang_files)
+
+            # Removing temp dir only on success to allow debug in case of failures
+            if self.output_dir is not None:
+                shutil.rmtree(self.output_dir)
+                self.output_dir = None
+
+        except Exception as e:
+            self.log.exception(e)
+            self.fail("Exception {}".format(e))
+
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+    else:
+        runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    ToscaTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_nsr_handler.py b/rwlaunchpad/test/utest_nsr_handler.py
new file mode 100755 (executable)
index 0000000..ffab929
--- /dev/null
@@ -0,0 +1,485 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+
+import xmlrunner
+
+import gi.repository.RwDts as rwdts
+import gi.repository.RwNsmYang as rwnsmyang
+import gi.repository.NsrYang as NsrYang
+import gi.repository.RwNsrYang as RwNsrYang
+import gi.repository.RwTypes as RwTypes
+import gi.repository.ProtobufC as ProtobufC
+import gi.repository.RwResourceMgrYang as RwResourceMgrYang
+import gi.repository.RwLaunchpadYang as launchpadyang
+import rift.tasklets
+import rift.test.dts
+
+import mano_ut
+
+
+if sys.version_info < (3, 4, 4):
+    asyncio.ensure_future = asyncio.async
+
+
+class NsrDtsHandler(object):
+    """ The network service DTS handler """
+    NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
+    SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+
+    def __init__(self, dts, log, loop, nsm):
+        self._dts = dts
+        self._log = log
+        self._loop = loop
+        self._nsm = nsm
+
+        self._nsr_regh = None
+        self._scale_regh = None
+
+    @property
+    def nsm(self):
+        """ Return the NS manager instance """
+        return self._nsm
+
+    def get_scale_group_instances(self, nsr_id, group_name):
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+
+        xact_ids = set()
+        for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+            elem_nsr_id = nsr_id_from_keyspec(keyspec)
+            if elem_nsr_id != nsr_id:
+                continue
+
+            elem_group_name = group_name_from_keyspec(keyspec)
+            if elem_group_name != group_name:
+                continue
+
+            xact_ids.add(instance_cfg.id)
+
+        return xact_ids
+
+    @asyncio.coroutine
+    def register(self):
+        """ Register for Nsr create/update/delete/read requests from dts """
+
+        def nsr_id_from_keyspec(ks):
+            nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+            nsr_id = nsr_path_entry.key00.id
+            return nsr_id
+
+        def group_name_from_keyspec(ks):
+            group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+            group_name = group_path_entry.key00.scaling_group_name_ref
+            return group_name
+
+        def is_instance_in_reg_elements(nsr_id, group_name, instance_id):
+            """ Return boolean indicating if scaling group instance was already commited previously.
+
+            By looking at the existing elements in this registration handle (elements not part
+            of this current xact), we can tell if the instance was configured previously without
+            keeping any application state.
+            """
+            for instance_cfg, keyspec in self._nsr_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                elem_group_name = group_name_from_keyspec(keyspec)
+
+                if elem_nsr_id != nsr_id or group_name != elem_group_name:
+                    continue
+
+                if instance_cfg.id == instance_id:
+                    return True
+
+            return False
+
+        def get_scale_group_instance_delta(nsr_id, group_name, xact):
+
+            #1. Find all elements in the  transaction add to the "added"
+            #2. Find matching elements in current elements, remove from "added".
+            #3. Find elements only in current, add to "deleted"
+
+            xact_ids = set()
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(xact, include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                xact_ids.add(instance_cfg.id)
+
+            current_ids = set()
+            for instance_cfg, keyspec in self._scale_regh.get_xact_elements(include_keyspec=True):
+                elem_nsr_id = nsr_id_from_keyspec(keyspec)
+                if elem_nsr_id != nsr_id:
+                    continue
+
+                elem_group_name = group_name_from_keyspec(keyspec)
+                if elem_group_name != group_name:
+                    continue
+
+                current_ids.add(instance_cfg.id)
+
+            delta = {
+                    "added": xact_ids - current_ids,
+                    "deleted": current_ids - xact_ids
+                    }
+            return delta
+
+        def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+            # Unforunately, it is currently difficult to figure out what has exactly
+            # changed in this xact without Pbdelta support (RIFT-4916)
+            # As a workaround, we can fetch the pre and post xact elements and
+            # perform a comparison to figure out adds/deletes/updates
+            xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+            curr_cfgs = list(dts_member_reg.elements)
+
+            xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+            curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+            # Find Adds
+            added_keys = set(xact_key_map) - set(curr_key_map)
+            added_cfgs = [xact_key_map[key] for key in added_keys]
+
+            # Find Deletes
+            deleted_keys = set(curr_key_map) - set(xact_key_map)
+            deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+            # Find Updates
+            updated_keys = set(curr_key_map) & set(xact_key_map)
+            updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+            return added_cfgs, deleted_cfgs, updated_cfgs
+
+        def on_apply(dts, acg, xact, action, scratch):
+            """Apply the  configuration"""
+            def handle_create_nsr(msg):
+                # Handle create nsr requests """
+                # Do some validations
+                if not msg.has_field("nsd_ref"):
+                    err = "NSD reference not provided"
+                    self._log.error(err)
+                    raise NetworkServiceRecordError(err)
+
+                self._log.info("Creating NetworkServiceRecord %s  from nsd_id  %s",
+                               msg.id, msg.nsd_ref)
+
+                #nsr = self.nsm.create_nsr(msg)
+                return nsr
+
+            def handle_delete_nsr(msg):
+                @asyncio.coroutine
+                def delete_instantiation(ns_id):
+                    """ Delete instantiation """
+                    pass
+                    #with self._dts.transaction() as xact:
+                        #yield from self._nsm.terminate_ns(ns_id, xact)
+
+                # Handle delete NSR requests
+                self._log.info("Delete req for  NSR Id: %s received", msg.id)
+                # Terminate the NSR instance
+                #nsr = self._nsm.get_ns_by_nsr_id(msg.id)
+
+                #nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
+                #event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+                #nsr.record_event("terminate-rcvd", event_descr)
+
+                #self._loop.create_task(delete_instantiation(msg.id))
+
+            @asyncio.coroutine
+            def begin_instantiation(nsr):
+                # Begin instantiation
+                pass
+                #self._log.info("Beginning NS instantiation: %s", nsr.id)
+                #yield from self._nsm.instantiate_ns(nsr.id, xact)
+
+            self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
+                            xact, action, scratch)
+
+            if action == rwdts.AppconfAction.INSTALL and xact.id is None:
+                self._log.debug("No xact handle.  Skipping apply config")
+                xact = None
+
+            (added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh, xact, "id")
+
+            for msg in added_msgs:
+                self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                #if msg.id not in self._nsm.nsrs:
+                #    self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
+                #    nsr = handle_create_nsr(msg)
+                #    self._loop.create_task(begin_instantiation(nsr))
+
+            for msg in deleted_msgs:
+                self._log.info("Delete NSR received in on_apply to terminate NS:%s", msg.id)
+                try:
+                    handle_delete_nsr(msg)
+                except Exception:
+                    self._log.exception("Failed to terminate NS:%s", msg.id)
+
+            for msg in updated_msgs:
+                self._log.info("Update NSR received in on_apply to change scaling groups in NS:%s", msg.id)
+
+                for group in msg.scaling_group:
+                    instance_delta = get_scale_group_instance_delta(msg.id, group.scaling_group_name_ref, xact)
+                    self._log.debug("Got NSR:%s scale group instance delta: %s", msg.id, instance_delta)
+
+                    #for instance_id in instance_delta["added"]:
+                    #    self._nsm.scale_nsr_out(msg.id, group.scaling_group_name_ref, instance_id, xact)
+
+                    #for instance_id in instance_delta["deleted"]:
+                    #    self._nsm.scale_nsr_in(msg.id, group.scaling_group_name_ref, instance_id)
+
+
+            return RwTypes.RwStatus.SUCCESS
+
+        @asyncio.coroutine
+        def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+            """ Prepare calllback from DTS for NSR """
+
+            xpath = ks_path.to_xpath(NsrYang.get_schema())
+            action = xact_info.query_action
+            self._log.debug(
+                    "Got Nsr prepare callback (xact: %s) (action: %s) (info: %s), %s:%s)",
+                    xact, action, xact_info, xpath, msg
+                    )
+
+            fref = ProtobufC.FieldReference.alloc()
+            fref.goto_whole_message(msg.to_pbcm())
+
+            if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+                pass
+                # Ensure the Cloud account has been specified if this is an NSR create
+                #if msg.id not in self._nsm.nsrs:
+                #    if not msg.has_field("cloud_account"):
+                #        raise NsrInstantiationFailed("Cloud account not specified in NSR")
+
+                # We do not allow scaling actions to occur if the NS is not in running state
+                #elif msg.has_field("scaling_group"):
+                #    nsr = self._nsm.nsrs[msg.id]
+                #    if nsr.state != NetworkServiceRecordState.RUNNING:
+                #        raise ScalingOperationError("Unable to perform scaling action when NS is not in running state")
+
+                #    if len(msg.scaling_group) > 1:
+                #        raise ScalingOperationError("Only a single scaling group can be configured at a time")
+
+                #    for group_msg in msg.scaling_group:
+                #        num_new_group_instances = len(group_msg.instance)
+                #        if num_new_group_instances > 1:
+                #            raise ScalingOperationError("Only a single scaling instance can be created at a time")
+
+                #        elif num_new_group_instances == 1:
+                #            scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]
+                #            if len(scale_group.instances) == scale_group.max_instance_count:
+                #                raise ScalingOperationError("Max instances for %s reached" % scale_group)
+
+
+            acg.handle.prepare_complete_ok(xact_info.handle)
+
+
+        self._log.debug("Registering for NSR config using xpath: %s",
+                        NsrDtsHandler.NSR_XPATH)
+
+        acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
+        with self._dts.appconf_group_create(handler=acg_hdl) as acg:
+            self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      on_prepare=on_prepare)
+
+            self._scale_regh = acg.register(
+                                      xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
+                                      flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+                                      )
+
+
+class XPaths(object):
+    @staticmethod
+    def nsr_config(nsr_id=None):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
+
+    def scaling_group_instance(nsr_id, group_name, instance_id):
+        return ("C,/nsr:ns-instance-config/nsr:nsr" +
+                "[nsr:id='{}']".format(nsr_id) +
+                "/nsr:scaling-group" +
+                "[nsr:scaling-group-name-ref='{}']".format(group_name) +
+                "/nsr:instance" +
+                "[nsr:id='{}']".format(instance_id)
+                )
+
+
+class NsrHandlerTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+    """
+    @classmethod
+    def configure_schema(cls):
+        return NsrYang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+        self.handler = NsrDtsHandler(self.dts, self.log, self.loop, None)
+
+        self.tinfo_c = self.new_tinfo(self.id() + "_client")
+        self.dts_c = rift.tasklets.DTS(self.tinfo_c, self.schema, self.loop)
+
+    @rift.test.dts.async_test
+    def test_add_delete_ns(self):
+
+        nsr1_uuid = "nsr1_uuid" # str(uuid.uuid4())
+        nsr2_uuid = "nsr2_uuid" # str(uuid.uuid4())
+
+        assert nsr1_uuid != nsr2_uuid
+
+        yield from self.handler.register()
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        self.log.debug("Creating NSR")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                XPaths.nsr_config(nsr1_uuid),
+                NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
+                    NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_create(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
+                    NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after adding 12345 to nsr1: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after adding 12345 to nsr1: %s", group_ids)
+        assert group_ids == {12345}
+
+        self.log.debug("\n\nADD A COMPLETELY DIFFERENT NSR\n")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_update(
+                XPaths.nsr_config(nsr2_uuid),
+                NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after adding new nsr: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after adding new nsr: %s", group_ids)
+        assert group_ids == {12345}
+
+        self.log.debug("\n\nDELETE A COMPLETELY DIFFERENT NSR\n")
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                XPaths.nsr_config(nsr2_uuid),
+                flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(.5, loop=self.loop)
+
+        group_ids = self.handler.get_scale_group_instances(nsr2_uuid, "group")
+        self.log.debug("Got group ids in nsr2 after deleting nsr2: %s", group_ids)
+        group_ids = self.handler.get_scale_group_instances(nsr1_uuid, "group")
+        self.log.debug("Got group ids in nsr1 after deleting nsr2: %s", group_ids)
+        assert group_ids == {12345}
+
+        with self.dts_c.transaction() as xact:
+            block = xact.block_create()
+            block.add_query_delete(
+                    XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
+                    flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
+                    )
+            yield from block.execute(now=True)
+
+        yield from asyncio.sleep(2, loop=self.loop)
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    NsrHandlerTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_ro_account.py b/rwlaunchpad/test/utest_ro_account.py
new file mode 100644 (file)
index 0000000..6e480d4
--- /dev/null
@@ -0,0 +1,153 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import asyncio
+import sys
+import types
+import unittest
+import uuid
+
+import rift.test.dts
+import rift.tasklets.rwnsmtasklet.cloud as cloud
+import rift.tasklets.rwnsmtasklet.openmano_nsm as openmano_nsm
+import rw_peas
+
+import gi
+gi.require_version('RwDtsYang', '1.0')
+from gi.repository import (
+        RwLaunchpadYang as launchpadyang,
+        RwDts as rwdts,
+        RwVnfdYang,
+        RwVnfrYang,
+        RwNsrYang,
+        RwNsdYang,
+        VnfrYang
+        )
+
+
+class DescriptorPublisher(object):
+    def __init__(self, log, dts, loop):
+        self.log = log
+        self.loop = loop
+        self.dts = dts
+
+        self._registrations = []
+
+    @asyncio.coroutine
+    def publish(self, w_path, path, desc):
+        ready_event = asyncio.Event(loop=self.loop)
+
+        @asyncio.coroutine
+        def on_ready(regh, status):
+            self.log.debug("Create element: %s, obj-type:%s obj:%s",
+                           path, type(desc), desc)
+            with self.dts.transaction() as xact:
+                regh.create_element(path, desc, xact.xact)
+            self.log.debug("Created element: %s, obj:%s", path, desc)
+            ready_event.set()
+
+        handler = rift.tasklets.DTS.RegistrationHandler(
+                on_ready=on_ready
+                )
+
+        self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+        reg = yield from self.dts.register(
+                w_path,
+                handler,
+                flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+                )
+        self._registrations.append(reg)
+        self.log.debug("Registered path : %s", w_path)
+        yield from ready_event.wait()
+
+        return reg
+
+    def unpublish_all(self):
+        self.log.debug("Deregistering all published descriptors")
+        for reg in self._registrations:
+            reg.deregister()
+
+class RoAccountDtsTestCase(rift.test.dts.AbstractDTSTest):
+    @classmethod
+    def configure_schema(cls):
+       return launchpadyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", test_id)
+        self.tinfo = self.new_tinfo(str(test_id))
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+        self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
+        self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
+
+        self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+
+    def tearDown(self):
+        super().tearDown()
+
+    @rift.test.dts.async_test
+    def test_orch_account_create(self):
+        orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, None)
+
+        yield from orch.register()
+
+        # Test if we have a default plugin in case no RO is specified.
+        assert type(orch.ro_plugin) is cloud.RwNsPlugin
+        mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+                {'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
+
+        # Test rift-ro plugin
+        w_xpath = "C,/rw-launchpad:resource-orchestrator"
+        xpath = w_xpath
+        yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        assert type(orch.ro_plugin) is cloud.RwNsPlugin
+
+        # Test Openmano plugin
+        mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+                {'name': 'openmano',
+                 'account_type': 'openmano',
+                 'openmano': {'tenant_id': "abc"}})
+        yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
+        yield from asyncio.sleep(5, loop=self.loop)
+
+        print (type(orch.ro_plugin))
+        assert type(orch.ro_plugin) is openmano_nsm.OpenmanoNsPlugin
+
+        # Test delete
+        yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
+                flags=rwdts.XactFlag.ADVISE)
+        assert orch.ro_plugin == None
+
+
+def main(argv=sys.argv[1:]):
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(
+            argv=[__file__] + argv,
+            testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+            )
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/rwlaunchpad/test/utest_rwmonitor.py b/rwlaunchpad/test/utest_rwmonitor.py
new file mode 100755 (executable)
index 0000000..46c33b3
--- /dev/null
@@ -0,0 +1,873 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import asyncio
+import concurrent.futures
+import logging
+import os
+import sys
+import time
+import unittest
+import uuid
+import xmlrunner
+
+import gi
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwmonYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwMon', '1.0')
+
+from gi.repository import (
+        NsrYang,
+        RwTypes,
+        RwVnfrYang,
+        RwcalYang,
+        RwmonYang,
+        VnfrYang,
+        )
+
+from rift.tasklets.rwmonitor.core import (
+        AccountAlreadyRegisteredError,
+        AccountInUseError,
+        InstanceConfiguration,
+        Monitor,
+        NfviInterface,
+        NfviMetrics,
+        NfviMetricsCache,
+        NfviMetricsPluginManager,
+        PluginFactory,
+        PluginNotSupportedError,
+        PluginUnavailableError,
+        UnknownAccountError,
+        )
+import rw_peas
+
+
+class wait_for_pending_tasks(object):
+    """
+    This class defines a decorator that can be used to ensure that any asyncio
+    tasks created as a side-effect of coroutine are allowed to come to
+    completion.
+    """
+
+    def __init__(self, loop, timeout=1):
+        self.loop = loop
+        self.timeout = timeout
+
+    def __call__(self, coro):
+        @asyncio.coroutine
+        def impl():
+            original = self.pending_tasks()
+            result = yield from coro()
+
+            current = self.pending_tasks()
+            remaining = current - original
+
+            if remaining:
+                yield from asyncio.wait(
+                        remaining,
+                        timeout=self.timeout,
+                        loop=self.loop,
+                        )
+
+            return result
+
+        return impl
+
+    def pending_tasks(self):
+        return {t for t in asyncio.Task.all_tasks(loop=self.loop) if not t.done()}
+
+
+class MockTasklet(object):
+    def __init__(self, dts, log, loop, records):
+        self.dts = dts
+        self.log = log
+        self.loop = loop
+        self.records = records
+        self.polling_period = 0
+        self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=16)
+
+
+def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
+    nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+    nsr.ns_instance_config_ref = ns_instance_config_ref
+    return nsr
+
+def make_vnfr(id=str(uuid.uuid4())):
+    vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+    vnfr.id = id
+    return vnfr
+
+def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
+    vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+    vdur.id = id
+    vdur.vim_id = vim_id
+    return vdur
+
+
+class TestNfviMetricsCache(unittest.TestCase):
+    class Plugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return True
+
+        def nfvi_metrics(self, account, vim_id):
+            metrics = RwmonYang.NfviMetrics()
+            metrics.vcpu.utilization = 0.5
+            return metrics
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        mock = self.plugin_manager.plugin(self.account.name)
+        mock.set_impl(TestNfviMetricsCache.Plugin())
+
+        self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        self.vdur.id = "test-vdur-id"
+        self.vdur.vim_id = "test-vim-id"
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 1
+        self.vdur.vm_flavor.storage_gb = 1
+
+    def test_create_destroy_entry(self):
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+        self.assertEqual(len(cache._nfvi_metrics), 0)
+
+        cache.create_entry(self.account, self.vdur)
+        self.assertEqual(len(cache._nfvi_metrics), 1)
+
+        cache.destroy_entry(self.vdur.id)
+        self.assertEqual(len(cache._nfvi_metrics), 0)
+
+    def test_retrieve(self):
+        NfviMetrics.SAMPLE_INTERVAL = 1
+
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+        cache.create_entry(self.account, self.vdur)
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def retrieve_metrics():
+            metrics = cache.retrieve("test-vim-id")
+            self.assertEqual(metrics.vcpu.utilization, 0.0)
+
+            yield from asyncio.sleep(NfviMetrics.SAMPLE_INTERVAL, loop=self.loop)
+
+            metrics = cache.retrieve("test-vim-id")
+            self.assertEqual(metrics.vcpu.utilization, 0.5)
+
+        self.loop.run_until_complete(retrieve_metrics())
+
+    def test_id_mapping(self):
+        cache = NfviMetricsCache(self.logger, self.loop, self.plugin_manager)
+
+        cache.create_entry(self.account, self.vdur)
+
+        self.assertEqual(cache.to_vim_id(self.vdur.id), self.vdur.vim_id)
+        self.assertEqual(cache.to_vdur_id(self.vdur.vim_id), self.vdur.id)
+        self.assertTrue(cache.contains_vdur_id(self.vdur.id))
+        self.assertTrue(cache.contains_vim_id(self.vdur.vim_id))
+
+        cache.destroy_entry(self.vdur.id)
+
+        self.assertFalse(cache.contains_vdur_id(self.vdur.id))
+        self.assertFalse(cache.contains_vim_id(self.vdur.vim_id))
+
+
+class TestNfviMetrics(unittest.TestCase):
+    class Plugin(object):
+        def nfvi_metrics_available(self, cloud_account):
+            return True
+
+        def nfvi_metrics(self, account, vim_id):
+            metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+            metrics.vcpu.utilization = 0.5
+            return None, metrics
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        self.plugin = TestNfviMetrics.Plugin()
+        self.logger = logging.getLogger('test-logger')
+
+        self.vdur = make_vdur()
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 100
+        self.vdur.vm_flavor.storage_gb = 2
+        self.vdur.vim_id = 'test-vim-id'
+
+    def test_update(self):
+        nfvi_metrics = NfviMetrics(
+                self.logger,
+                self.loop,
+                self.account,
+                self.plugin,
+                self.vdur,
+                )
+
+        # Reduce the SAMPLE_INTERVAL so that the test does not take a long time
+        nfvi_metrics.SAMPLE_INTERVAL = 1
+
+        # The metrics have never been retrieved so they should be updated
+        self.assertTrue(nfvi_metrics.should_update())
+
+        # The metrics return will be empty because the cache version is empty.
+        # However, this trigger an update to retrieve metrics from the plugin.
+        metrics = nfvi_metrics.retrieve()
+        self.assertEqual(metrics.vcpu.utilization, 0.0)
+
+        # An update has been trigger by the retrieve call so additional updates
+        # should not happen
+        self.assertFalse(nfvi_metrics.should_update())
+        self.assertFalse(nfvi_metrics._updating.done())
+
+        # Allow the event loop to run until the update is complete
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_update():
+            yield from asyncio.wait_for(
+                    nfvi_metrics._updating,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_update())
+
+        # Check that we have a new metrics object
+        metrics = nfvi_metrics.retrieve()
+        self.assertEqual(metrics.vcpu.utilization, 0.5)
+
+        # We have just updated the metrics so it should be unnecessary to update
+        # right now
+        self.assertFalse(nfvi_metrics.should_update())
+        self.assertTrue(nfvi_metrics._updating.done())
+
+        # Wait an amount of time equal to the SAMPLE_INTERVAL. This ensures
+        # that the metrics that were just retrieved become stale...
+        time.sleep(NfviMetrics.SAMPLE_INTERVAL)
+
+        # ...now it is time to update again
+        self.assertTrue(nfvi_metrics.should_update())
+
+
+class TestNfviInterface(unittest.TestCase):
+    class NfviPluginImpl(object):
+        def __init__(self):
+            self._alarms = set()
+
+        def nfvi_metrics(self, account, vm_id):
+            return rwmon.NfviMetrics()
+
+        def nfvi_metrics_available(self, account):
+            return True
+
+        def alarm_create(self, account, vim_id, alarm):
+            alarm.alarm_id = str(uuid.uuid4())
+            self._alarms.add(alarm.alarm_id)
+            return RwTypes.RwStatus.SUCCESS
+
+        def alarm_delete(self, account, alarm_id):
+            self._alarms.remove(alarm_id)
+            return RwTypes.RwStatus.SUCCESS
+
+    def setUp(self):
+        self.loop = asyncio.new_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        # Define the VDUR to avoid division by zero
+        self.vdur = make_vdur()
+        self.vdur.vm_flavor.vcpu_count = 4
+        self.vdur.vm_flavor.memory_mb = 100
+        self.vdur.vm_flavor.storage_gb = 2
+        self.vdur.vim_id = 'test-vim-id'
+
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        self.cache = NfviMetricsCache(
+                self.logger,
+                self.loop,
+                self.plugin_manager,
+                )
+
+        self.nfvi_interface = NfviInterface(
+                self.loop,
+                self.logger,
+                self.plugin_manager,
+                self.cache
+                )
+
+    def test_nfvi_metrics_available(self):
+        self.assertTrue(self.nfvi_interface.nfvi_metrics_available(self.account))
+
+    def test_retrieve(self):
+        pass
+
+    def test_alarm_create_and_destroy(self):
+        alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+        alarm.name = "test-alarm"
+        alarm.description = "test-description"
+        alarm.vdur_id = "test-vdur-id"
+        alarm.metric = "CPU_UTILIZATION"
+        alarm.statistic = "MINIMUM"
+        alarm.operation = "GT"
+        alarm.value = 0.1
+        alarm.period = 10
+        alarm.evaluations = 1
+
+        plugin_impl = TestNfviInterface.NfviPluginImpl()
+        plugin = self.plugin_manager.plugin(self.account.name)
+        plugin.set_impl(plugin_impl)
+
+        self.assertEqual(len(plugin_impl._alarms), 0)
+
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_create():
+            coro = self.nfvi_interface.alarm_create(
+                    self.account,
+                    "test-vim-id",
+                    alarm,
+                    )
+            yield from asyncio.wait_for(
+                    coro,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_create())
+        self.assertEqual(len(plugin_impl._alarms), 1)
+        self.assertTrue(alarm.alarm_id is not None)
+
+        @asyncio.coroutine
+        @wait_for_pending_tasks(self.loop)
+        def wait_for_destroy():
+            coro = self.nfvi_interface.alarm_destroy(
+                    self.account,
+                    alarm.alarm_id,
+                    )
+            yield from asyncio.wait_for(
+                    coro,
+                    timeout=2,
+                    loop=self.loop,
+                    )
+
+        self.loop.run_until_complete(wait_for_destroy())
+        self.assertEqual(len(plugin_impl._alarms), 0)
+
+
+class TestVdurNfviMetrics(unittest.TestCase):
+    def setUp(self):
+        # Reduce the sample interval so that test run quickly
+        NfviMetrics.SAMPLE_INTERVAL = 0.1
+
+        # Create a mock plugin to define the metrics retrieved. The plugin will
+        # return a VCPU utilization of 0.5.
+        class MockPlugin(object):
+            def __init__(self):
+                self.metrics = RwmonYang.NfviMetrics()
+
+            def nfvi_metrics(self, account, vim_id):
+                self.metrics.vcpu.utilization = 0.5
+                return self.metrics
+
+        self.loop = asyncio.get_event_loop()
+        self.logger = logging.getLogger('test-logger')
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+        # Define the VDUR to avoid division by zero
+        vdur = make_vdur()
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+        vdur.vim_id = 'test-vim-id'
+
+        # Instantiate the mock plugin
+        self.plugin_manager = NfviMetricsPluginManager(self.logger)
+        self.plugin_manager.register(self.account, "mock")
+
+        self.plugin = self.plugin_manager.plugin(self.account.name)
+        self.plugin.set_impl(MockPlugin())
+
+        self.cache = NfviMetricsCache(
+                self.logger,
+                self.loop,
+                self.plugin_manager,
+                )
+
+        self.manager = NfviInterface(
+                self.loop,
+                self.logger,
+                self.plugin_manager,
+                self.cache,
+                )
+
+        self.metrics = NfviMetrics(
+                self.logger,
+                self.loop,
+                self.account,
+                self.plugin,
+                vdur,
+                )
+
+    def test_retrieval(self):
+        metrics_a = None
+        metrics_b = None
+
+        # Define a coroutine that can be added to the asyncio event loop
+        @asyncio.coroutine
+        def update():
+            # Output from the metrics calls with be written to these nonlocal
+            # variables
+            nonlocal metrics_a
+            nonlocal metrics_b
+
+            # This first call will return the current metrics values and
+            # schedule a request to the NFVI to retrieve metrics from the data
+            # source. All metrics will be zero at this point.
+            metrics_a = self.metrics.retrieve()
+
+            # Wait for the scheduled update to take effect
+            yield from asyncio.sleep(0.2, loop=self.loop)
+
+            # Retrieve the updated metrics
+            metrics_b = self.metrics.retrieve()
+
+        self.loop.run_until_complete(update())
+
+        # Check that the metrics returned indicate that the plugin was queried
+        # and returned the appropriate value, i.e. 0.5 utilization
+        self.assertEqual(0.0, metrics_a.vcpu.utilization)
+        self.assertEqual(0.5, metrics_b.vcpu.utilization)
+
+
+class TestNfviMetricsPluginManager(unittest.TestCase):
+    def setUp(self):
+        self.logger = logging.getLogger('test-logger')
+        self.plugins = NfviMetricsPluginManager(self.logger)
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+    def test_mock_plugin(self):
+        # Register an account name with a mock plugin. If successful, the
+        # plugin manager should return a non-None object.
+        self.plugins.register(self.account, 'mock')
+        self.assertIsNotNone(self.plugins.plugin(self.account.name))
+
+        # Now unregister the cloud account
+        self.plugins.unregister(self.account.name)
+
+        # Trying to retrieve a plugin for a cloud account that has not been
+        # registered with the manager is expected to raise an exception.
+        with self.assertRaises(KeyError):
+            self.plugins.plugin(self.account.name)
+
+    def test_multiple_registration(self):
+        self.plugins.register(self.account, 'mock')
+
+        # Attempting to register the account with another type of plugin will
+        # also cause an exception to be raised.
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.plugins.register(self.account, 'mock')
+
+        # Attempting to register the account with 'openstack' again with cause
+        # an exception to be raised.
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.plugins.register(self.account, 'openstack')
+
+    def test_unsupported_plugin(self):
+        # If an attempt is made to register a cloud account with an unknown
+        # type of plugin, a PluginNotSupportedError should be raised.
+        with self.assertRaises(PluginNotSupportedError):
+            self.plugins.register(self.account, 'unsupported-plugin')
+
+    def test_anavailable_plugin(self):
+        # Create a factory that always raises PluginUnavailableError
+        class UnavailablePluginFactory(PluginFactory):
+            PLUGIN_NAME = "unavailable-plugin"
+
+            def create(self, cloud_account):
+                raise PluginUnavailableError()
+
+        # Register the factory
+        self.plugins.register_plugin_factory(UnavailablePluginFactory())
+
+        # Ensure that the correct exception propagates when the cloud account
+        # is registered.
+        with self.assertRaises(PluginUnavailableError):
+            self.plugins.register(self.account, "unavailable-plugin")
+
+
+class TestMonitor(unittest.TestCase):
+    """
+    The Monitor class is the implementation that is called by the
+    MonitorTasklet. It provides the unified interface for controlling and
+    querying the monitoring functionality.
+    """
+
+    def setUp(self):
+        # Reduce the sample interval so that test run quickly
+        NfviMetrics.SAMPLE_INTERVAL = 0.1
+
+        self.loop = asyncio.get_event_loop()
+        self.logger = logging.getLogger('test-logger')
+        self.config = InstanceConfiguration()
+        self.monitor = Monitor(self.loop, self.logger, self.config)
+
+        self.account = RwcalYang.CloudAccount(
+                name='test-cloud-account',
+                account_type="mock",
+                )
+
+    def test_instance_config(self):
+        """
+        Configuration data for an instance is pass to the Monitor when it is
+        created. The data is passed in the InstanceConfiguration object. This
+        object is typically shared between the tasklet and the monitor, and
+        provides a way for the tasklet to update the configuration of the
+        monitor.
+        """
+        self.assertTrue(hasattr(self.monitor._config, "polling_period"))
+        self.assertTrue(hasattr(self.monitor._config, "min_cache_lifetime"))
+        self.assertTrue(hasattr(self.monitor._config, "max_polling_frequency"))
+
+    def test_monitor_cloud_accounts(self):
+        """
+        This test checks the cloud accounts are correctly added and deleted,
+        and that the correct exceptions are raised on duplicate adds or
+        deletes.
+
+        """
+        # Add the cloud account to the monitor
+        self.monitor.add_cloud_account(self.account)
+        self.assertIn(self.account.name, self.monitor._cloud_accounts)
+
+        # Add the cloud account to the monitor again
+        with self.assertRaises(AccountAlreadyRegisteredError):
+            self.monitor.add_cloud_account(self.account)
+
+        # Delete the cloud account
+        self.monitor.remove_cloud_account(self.account.name)
+        self.assertNotIn(self.account.name, self.monitor._cloud_accounts)
+
+        # Delete the cloud account again
+        with self.assertRaises(UnknownAccountError):
+            self.monitor.remove_cloud_account(self.account.name)
+
+    def test_monitor_cloud_accounts_illegal_removal(self):
+        """
+        A cloud account may not be removed while there are plugins or records
+        that are associated with it. Attempting to delete such a cloud account
+        will raise an exception.
+        """
+        # Add the cloud account to the monitor
+        self.monitor.add_cloud_account(self.account)
+
+        # Create a VNFR associated with the cloud account
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        # Add a VDUR to the VNFR
+        vdur = vnfr.vdur.add()
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.id = 'test-vdur-id-1'
+
+        # Now add the VNFR to the monitor
+        self.monitor.add_vnfr(vnfr)
+
+        # Check that the monitor contains the VNFR, VDUR, and metrics
+        self.assertTrue(self.monitor.is_registered_vdur(vdur.id))
+        self.assertTrue(self.monitor.is_registered_vnfr(vnfr.id))
+        self.assertEqual(1, len(self.monitor.metrics))
+
+        # Deleting the cloud account now should raise an exception because the
+        # VNFR and VDUR are associated with the cloud account.
+        with self.assertRaises(AccountInUseError):
+            self.monitor.remove_cloud_account(self.account.name)
+
+        # Now remove the VNFR from the monitor
+        self.monitor.remove_vnfr(vnfr.id)
+        self.assertFalse(self.monitor.is_registered_vdur(vdur.id))
+        self.assertFalse(self.monitor.is_registered_vnfr(vnfr.id))
+        self.assertEqual(0, len(self.monitor.metrics))
+
+        # Safely delete the cloud account
+        self.monitor.remove_cloud_account(self.account.name)
+
+    def test_vdur_registration(self):
+        """
+        When a VDUR is registered with the Monitor it is registered with the
+        VdurNfviMetricsManager. Thus it is assigned a plugin that can be used
+        to retrieve the NFVI metrics associated with the VDU.
+        """
+        # Define the VDUR to be registered
+        vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+        vdur.vim_id = 'test-vim-id'
+        vdur.id = 'test-vdur-id'
+
+        # Before registering the VDUR, the cloud account needs to be added to
+        # the monitor.
+        self.monitor.add_cloud_account(self.account)
+
+        # Register the VDUR with the monitor
+        self.monitor.add_vdur(self.account, vdur)
+        self.assertTrue(self.monitor.is_registered_vdur(vdur.id))
+
+        # Check that the VDUR has been added to the metrics cache
+        self.assertTrue(self.monitor.cache.contains_vdur_id(vdur.id))
+
+        # Unregister the VDUR
+        self.monitor.remove_vdur(vdur.id)
+        self.assertFalse(self.monitor.is_registered_vdur(vdur.id))
+
+        # Check that the VDUR has been removed from the metrics cache
+        self.assertFalse(self.monitor.cache.contains_vdur_id(vdur.id))
+
+    def test_vnfr_add_update_delete(self):
+        """
+        When a VNFR is added to the Monitor a record is created of the
+        relationship between the VNFR and any VDURs that it contains. Each VDUR
+        is then registered with the VdurNfviMetricsManager. A VNFR can also be
+        updated so that it contains more of less VDURs. Any VDURs that are
+        added to the VNFR are registered with the NdurNfviMetricsManager, and
+        any that are removed are unregistered. When a VNFR is deleted, all of
+        the VDURs contained in the VNFR are unregistered.
+        """
+        # Define the VDUR to be registered
+        vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.id = 'test-vdur-id-1'
+
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        vnfr.vdur.append(vdur)
+
+        self.monitor.add_cloud_account(self.account)
+
+        # Add the VNFR to the monitor. This will also register VDURs contained
+        # in the VNFR with the monitor.
+        self.monitor.add_vnfr(vnfr)
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-1'))
+
+        # Add another VDUR to the VNFR and update the monitor. Both VDURs
+        # should now be registered
+        vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+        vdur.vim_id = 'test-vim-id-2'
+        vdur.id = 'test-vdur-id-2'
+
+        vnfr.vdur.append(vdur)
+
+        self.monitor.update_vnfr(vnfr)
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-1'))
+        self.assertTrue(self.monitor.is_registered_vdur('test-vdur-id-2'))
+
+        # Delete the VNFR from the monitor. This should remove the VNFR and all
+        # of the associated VDURs from the monitor.
+        self.monitor.remove_vnfr(vnfr.id)
+        self.assertFalse(self.monitor.is_registered_vnfr('test-vnfr-id'))
+        self.assertFalse(self.monitor.is_registered_vdur('test-vdur-id-1'))
+        self.assertFalse(self.monitor.is_registered_vdur('test-vdur-id-2'))
+
+        with self.assertRaises(KeyError):
+            self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+
+        with self.assertRaises(KeyError):
+            self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+    def test_complete(self):
+        """
+        This test simulates the addition of a VNFR to the Monitor (along with
+        updates), and retrieves NFVI metrics from the VDUR. The VNFR is then
+        deleted, which should result in a cleanup of all the data in the
+        Monitor.
+        """
+        # Create the VNFR
+        vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+        vnfr.cloud_account = self.account.name
+        vnfr.id = 'test-vnfr-id'
+
+        # Create 2 VDURs
+        vdur = vnfr.vdur.add()
+        vdur.id = 'test-vdur-id-1'
+        vdur.vim_id = 'test-vim-id-1'
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+
+        vdur = vnfr.vdur.add()
+        vdur.id = 'test-vdur-id-2'
+        vdur.vim_id = 'test-vim-id-2'
+        vdur.vm_flavor.vcpu_count = 4
+        vdur.vm_flavor.memory_mb = 100
+        vdur.vm_flavor.storage_gb = 2
+
+        class MockPlugin(object):
+            def __init__(self):
+                self._metrics = dict()
+                self._metrics['test-vim-id-1'] = RwmonYang.NfviMetrics()
+                self._metrics['test-vim-id-2'] = RwmonYang.NfviMetrics()
+
+            def nfvi_metrics(self, account, vim_id):
+                metrics = self._metrics[vim_id]
+
+                if vim_id == 'test-vim-id-1':
+                    metrics.memory.used += 1000
+                else:
+                    metrics.memory.used += 2000
+
+                return metrics
+
+        class MockFactory(PluginFactory):
+            PLUGIN_NAME = "mock"
+
+            def create(self, cloud_account):
+                plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+                impl = plugin.get_interface("Monitoring")
+                impl.set_impl(MockPlugin())
+                return impl
+
+        # Modify the mock plugin factory
+        self.monitor._nfvi_plugins._factories["mock"] = MockFactory()
+
+        # Add the cloud account the monitor
+        self.monitor.add_cloud_account(self.account)
+
+        # Add the VNFR to the monitor.
+        self.monitor.add_vnfr(vnfr)
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call1():
+            # call #1 (time = 0.00s)
+            # The metrics for these VDURs have not been populated yet so a
+            # default metrics object (all zeros) is returned, and a request is
+            # scheduled with the data source to retrieve the metrics.
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(0, metrics1.memory.used)
+            self.assertEqual(0, metrics2.memory.used)
+
+        self.loop.run_until_complete(call1())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call2():
+            # call #2 (wait 0.05s)
+            # The metrics have been populated with data from the data source
+            # due to the request made during call #1.
+            yield from asyncio.sleep(0.05)
+
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(1000, metrics1.memory.used)
+            self.assertEqual(2000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call2())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call3():
+            # call #3 (wait 0.50s)
+            # This call exceeds 0.1s (the sample interval of the plugin)
+            # from when the data was retrieved. The cached metrics are
+            # immediately returned, but a request is made to the data source to
+            # refresh these metrics.
+            yield from asyncio.sleep(0.10)
+
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(1000, metrics1.memory.used)
+            self.assertEqual(2000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call3())
+
+        @wait_for_pending_tasks(self.loop)
+        @asyncio.coroutine
+        def call4():
+            # call #4 (wait 1.00s)
+            # The metrics retrieved differ from those in call #3 because the
+            # cached metrics have been updated.
+            yield from asyncio.sleep(0.10)
+            metrics1 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-1')
+            metrics2 = self.monitor.retrieve_nfvi_metrics('test-vdur-id-2')
+
+            self.assertEqual(2000, metrics1.memory.used)
+            self.assertEqual(4000, metrics2.memory.used)
+
+        self.loop.run_until_complete(call4())
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+    # Set the logger in this test to use a null handler
+    logging.getLogger('test-logger').addHandler(logging.NullHandler())
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(
+                output=os.environ["RIFT_MODULE_TEST"]))
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_rwnsm.py b/rwlaunchpad/test/utest_rwnsm.py
new file mode 100755 (executable)
index 0000000..e125739
--- /dev/null
@@ -0,0 +1,215 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import argparse
+import logging
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+
+from gi.repository import (
+        NsdYang,
+        NsrYang,
+        )
+
+logger = logging.getLogger('test-rwnsmtasklet')
+
+import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet
+import rift.tasklets.rwnsmtasklet.xpath as rwxpath
+
+class TestGiXpath(unittest.TestCase):
+    def setUp(self):
+        rwxpath.reset_cache()
+
+    def test_nsd_elements(self):
+        """
+        Test that a particular element in a list is corerctly retrieved. In
+        this case, we are trying to retrieve an NSD from the NSD catalog.
+
+        """
+        # Create the initial NSD catalog
+        nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+
+        # Create an NSD, set its 'id', and add it to the catalog
+        nsd_id = str(uuid.uuid4())
+        nsd_catalog.nsd.append(
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+                    id=nsd_id,
+                    )
+                )
+
+        # Retrieve the NSD using and xpath expression
+        xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+        nsd = rwxpath.getxattr(nsd_catalog, xpath)
+
+        self.assertEqual(nsd_id, nsd.id)
+
+        # Modified the name of the NSD using an xpath expression
+        rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name")
+
+        name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name")
+        self.assertEqual("test-name", name)
+
+    def test_nsd_scalar_fields(self):
+        """
+        Test that setxattr correctly sets the value specified by an xpath.
+
+        """
+        # Define a simple NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+
+        # Check that the unset fields are in fact set to None
+        self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+        self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+
+        # Set the values of the 'name' and 'short-name' fields
+        rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
+        rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+
+        # Check that the 'name' and 'short-name' fields are correctly set
+        self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
+        self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+
+
+class TestInputParameterSubstitution(unittest.TestCase):
+    def setUp(self):
+        self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger)
+
+    def test_null_arguments(self):
+        """
+        If None is passed to the substitutor for either the NSD or the NSR
+        config, no exception should be raised.
+
+        """
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+
+        self.substitute_input_parameters(None, None)
+        self.substitute_input_parameters(nsd, None)
+        self.substitute_input_parameters(None, nsr_config)
+
+    def test_illegal_input_parameter(self):
+        """
+        In the NSD there is a list of the parameters that are allowed to be
+        sbustituted by input parameters. This test checks that when an input
+        parameter is provided in the NSR config that is not in the NSD, it is
+        not applied.
+
+        """
+        # Define the original NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsd.name = "robert"
+        nsd.short_name = "bob"
+
+        # Define which parameters may be modified
+        nsd.input_parameter_xpath.append(
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                    label="NSD Name",
+                    )
+                )
+
+        # Define the input parameters that are intended to be modified
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr_config.input_parameter.extend([
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                value="alice",
+                ),
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                value="alice",
+                ),
+            ])
+
+        self.substitute_input_parameters(nsd, nsr_config)
+
+        # Verify that only the parameter in the input_parameter_xpath list is
+        # modified after the input parameters have been applied.
+        self.assertEqual("alice", nsd.name)
+        self.assertEqual("bob", nsd.short_name)
+
+    def test_substitution(self):
+        """
+        Test that substitution of input parameters occurs as expected.
+
+        """
+        # Define the original NSD
+        nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+        nsd.name = "robert"
+        nsd.short_name = "bob"
+
+        # Define which parameters may be modified
+        nsd.input_parameter_xpath.extend([
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                    label="NSD Name",
+                    ),
+                NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+                    xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                    label="NSD Short Name",
+                    ),
+                ])
+
+        # Define the input parameters that are intended to be modified
+        nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+        nsr_config.input_parameter.extend([
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
+                value="robert",
+                ),
+            NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+                xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
+                value="bob",
+                ),
+            ])
+
+        self.substitute_input_parameters(nsd, nsr_config)
+
+        # Verify that both the 'name' and 'short-name' fields are correctly
+        # replaced.
+        self.assertEqual("robert", nsd.name)
+        self.assertEqual("bob", nsd.short_name)
+
+
+def main(argv=sys.argv[1:]):
+    logging.basicConfig(format='TEST %(message)s')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+
+    args = parser.parse_args(argv)
+
+    # Set the global logging level
+    logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.FATAL)
+
+    # Make the test logger very quiet
+    logger.addHandler(logging.NullHandler())
+
+    # The unittest framework requires a program name, so use the name of this
+    # file instead (we do not want to have to pass a fake program name to main
+    # when this is called from the interpreter).
+    unittest.main(argv=[__file__] + argv,
+            testRunner=xmlrunner.XMLTestRunner(
+                output=os.environ["RIFT_MODULE_TEST"]))
+
+if __name__ == '__main__':
+    main()
diff --git a/rwlaunchpad/test/utest_scaling_rpc.py b/rwlaunchpad/test/utest_scaling_rpc.py
new file mode 100644 (file)
index 0000000..b2290af
--- /dev/null
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+
+import asyncio
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+import argparse
+import logging
+import time
+import types
+
+import gi
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwNsmYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwResourceMgrYang', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+from gi.repository import (
+    RwCloudYang as rwcloudyang,
+    RwDts as rwdts,
+    RwLaunchpadYang as launchpadyang,
+    RwNsmYang as rwnsmyang,
+    RwNsrYang as rwnsryang,
+    NsrYang as nsryang,
+    RwResourceMgrYang as rmgryang,
+    RwcalYang as rwcalyang,
+    RwConfigAgentYang as rwcfg_agent,
+    RwlogMgmtYang
+)
+
+from gi.repository.RwTypes import RwStatus
+import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
+import rift.tasklets
+import rift.test.dts
+import rw_peas
+
+
+
+
+class ManoTestCase(rift.test.dts.AbstractDTSTest):
+    """
+    DTS GI interface unittests
+
+    Note:  Each tests uses a list of asyncio.Events for staging through the
+    test.  These are required here because we are bring up each coroutine
+    ("tasklet") at the same time and are not implementing any re-try
+    mechanisms.  For instance, this is used in numerous tests to make sure that
+    a publisher is up and ready before the subscriber sends queries.  Such
+    event lists should not be used in production software.
+    """
+
+    @classmethod
+    def configure_suite(cls, rwmain):
+        nsm_dir = os.environ.get('NSM_DIR')
+
+        rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet')
+
+    @classmethod
+    def configure_schema(cls):
+        return rwnsmyang.get_schema()
+
+    @classmethod
+    def configure_timeout(cls):
+        return 240
+
+    @staticmethod
+    def get_cal_account(account_type, account_name):
+        """
+        Creates an object for class RwcalYang.Clo
+        """
+        account = rwcloudyang.CloudAccount()
+        if account_type == 'mock':
+            account.name          = account_name
+            account.account_type  = "mock"
+            account.mock.username = "mock_user"
+        elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
+            account.name = account_name
+            account.account_type = 'openstack'
+            account.openstack.key = openstack_info['username']
+            account.openstack.secret       = openstack_info['password']
+            account.openstack.auth_url     = openstack_info['auth_url']
+            account.openstack.tenant       = openstack_info['project_name']
+            account.openstack.mgmt_network = openstack_info['mgmt_network']
+        return account
+
+    @asyncio.coroutine
+    def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+        account = self.get_cal_account(cloud_type, cloud_name)
+        account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+        self.log.info("Configuring cloud-account: %s", account)
+        yield from dts.query_create(account_xpath,
+                                    rwdts.XactFlag.ADVISE,
+                                    account)
+
+    @asyncio.coroutine
+    def wait_tasklets(self):
+        yield from asyncio.sleep(5, loop=self.loop)
+
+    def configure_test(self, loop, test_id):
+        self.log.debug("STARTING - %s", self.id())
+        self.tinfo = self.new_tinfo(self.id())
+        self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+    def test_create_nsr_record(self):
+
+        @asyncio.coroutine
+        def run_test():
+            yield from self.wait_tasklets()
+
+            cloud_type = "mock"
+            yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
+
+
+            # Trigger an rpc
+            rpc_ip = nsryang.YangInput_Nsr_ExecScaleIn.from_dict({
+                'nsr_id_ref': '1',
+                'instance_id': "1",
+                'scaling_group_name_ref': "foo"})
+
+            yield from self.dts.query_rpc("/nsr:exec-scale-in", 0, rpc_ip)
+
+        future = asyncio.ensure_future(run_test(), loop=self.loop)
+        self.run_until(future.done)
+        if future.exception() is not None:
+            self.log.error("Caught exception during test")
+            raise future.exception()
+
+
+def main():
+    top_dir = __file__[:__file__.find('/modules/core/')]
+    build_dir = os.path.join(top_dir, '.build/modules/core/rwvx/src/core_rwvx-build')
+    launchpad_build_dir = os.path.join(top_dir, '.build/modules/core/mc/core_mc-build/rwlaunchpad')
+
+    if 'NSM_DIR' not in os.environ:
+        os.environ['NSM_DIR'] = os.path.join(launchpad_build_dir, 'plugins/rwnsm')
+
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument('-n', '--no-runner', action='store_true')
+    args, unittest_args = parser.parse_known_args()
+    if args.no_runner:
+        runner = None
+
+    ManoTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+    unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwmon/CMakeLists.txt b/rwmon/CMakeLists.txt
new file mode 100644 (file)
index 0000000..d10930b
--- /dev/null
@@ -0,0 +1,28 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 2015/10/27
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(PKG_NAME rwmon)
+set(PKG_VERSION 1.0)
+set(PKG_RELEASE 1)
+set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+
+set(subdirs plugins test)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwmon/Makefile b/rwmon/Makefile
new file mode 100644 (file)
index 0000000..14f3400
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 05/22/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/plugins/CMakeLists.txt b/rwmon/plugins/CMakeLists.txt
new file mode 100644 (file)
index 0000000..9a0722a
--- /dev/null
@@ -0,0 +1,23 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 2015/10/27
+# 
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs vala yang)
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwmon/plugins/vala/CMakeLists.txt b/rwmon/plugins/vala/CMakeLists.txt
new file mode 100644 (file)
index 0000000..aa900de
--- /dev/null
@@ -0,0 +1,70 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Joshua Downer
+# Creation Date: 2015/10/27
+# 
+
+##
+# Allow specific compiler warnings
+##
+rift_allow_compiler_warning(unused-but-set-variable)
+
+set(VALA_NAME rwmon)
+set(VALA_FILES ${VALA_NAME}.vala)
+set(VALA_VERSION 1.0)
+set(VALA_RELEASE 1)
+set(VALA_LONG_NAME ${VALA_NAME}-${VALA_VERSION})
+set(VALA_TYPELIB_PREFIX RwMon-${VALA_VERSION})
+
+rift_add_vala(
+  ${VALA_LONG_NAME}
+  VALA_FILES ${VALA_FILES}
+  VALA_PACKAGES
+    rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
+    rw_log_yang-1.0 rw_base_yang-1.0 rwmon_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
+    rw_log-1.0 rwcal_yang-1.0
+  VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwmon/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwmon/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
+            ${RIFT_SUBMODULE_BINARY_ROOT}/rwlog/src
+  GENERATE_HEADER_FILE${VALA_NAME}.h
+  GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
+  GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
+  GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
+  GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
+  DEPENDS rwmon_yang rwcal_yang rwlog_gi rwschema_yang
+  )
+
+rift_install_vala_artifacts(
+  SO_FILES lib${VALA_LONG_NAME}.so
+  VAPI_FILES ${VALA_LONG_NAME}.vapi
+  GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
+  TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
+  COMPONENT ${PKG_LONG_NAME}
+  DEST_PREFIX .
+  )
+
+set(subdirs
+  rwmon_ceilometer
+  rwmon_mock
+  )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
diff --git a/rwmon/plugins/vala/Makefile b/rwmon/plugins/vala/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/plugins/vala/rwmon.vala b/rwmon/plugins/vala/rwmon.vala
new file mode 100644 (file)
index 0000000..560a5ec
--- /dev/null
@@ -0,0 +1,149 @@
+namespace RwMon {
+  public interface Monitoring: GLib.Object {
+    /**
+     * Init routine
+     *
+     * @param log_ctx - [in] the log context to use
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus init(RwLog.Ctx log_ctx);
+
+    /**
+     * nfvi_metrics
+     *
+     * Returns the NFVI metrics for a particular VM
+     *
+     * @param account - [in] the account details of the owner of the VM
+     * @param vm_id   - [in] the ID of the VM to retrieve the metrics from
+     * @param metrics - [out] An NfviMetrics object
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus nfvi_metrics(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwmon.NfviMetrics metrics);
+
+    /**
+     * nfvi_vcpu_metrics
+     *
+     * Returns the VCPU metrics for a particular VM
+     *
+     * @param account - [in] the account details of the owner of the VM
+     * @param vm_id   - [in] the ID of the VM to retrieve the metrics from
+     * @param metrics - [out] An NfviMetrics_Vcpu object
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus nfvi_vcpu_metrics(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwmon.NfviMetrics_Vcpu metrics);
+
+    /**
+     * nfvi_memory_metrics
+     *
+     * Returns the memory metrics for a particular VM
+     *
+     * @param account - [in] the account details of the owner of the VM
+     * @param vm_id   - [in] the ID of the VM to retrieve the metrics from
+     * @param metrics - [out] An NfviMetrics_Memory object
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus nfvi_memory_metrics(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwmon.NfviMetrics_Memory metrics);
+
+    /**
+     * nfvi_storage_metrics
+     *
+     * Returns the storage metrics for a particular VM
+     *
+     * @param account - [in] the account details of the owner of the VM
+     * @param vm_id   - [in] the ID of the VM to retrieve the metrics from
+     * @param metrics - [out] An NfviMetrics_Storage object
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus nfvi_storage_metrics(
+      Rwcal.CloudAccount account,
+      string vm_id,
+      out Rwmon.NfviMetrics_Storage metrics);
+
+    /**
+     * nfvi_metrics_available
+     *
+     * Checks whether ceilometer exists for this account and is 
+     * providing NFVI metrics
+     *
+     * @param account - [in] the account details of the owner of the VM
+     * @param present - [out] True if ceilometer exists, False otherwise
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus nfvi_metrics_available(
+      Rwcal.CloudAccount account,
+      out bool present);
+
+    /**
+     * alarm_create
+     *
+     * @param account - [in] the credentials required to update the alarm.
+     * @param vim_id  - [in] the identifier assigned by a VIM to the VDU that
+     *                  the alarm is associated with.
+     * @param alarm   - [ref] an alarm structure defining the alarm. Note that
+     *                  the underlying implmentation will fill in the alarm_id
+     *                  attribute, which is required for modifying or deleting
+     *                  an alarm.
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus alarm_create(
+      Rwcal.CloudAccount account,
+      string vim_id,
+      ref Rwmon.Alarm alarm);
+
+    /**
+     * alarm_update
+     *
+     * @param account - [in] the credentials required to update the alarm.
+     * @param alarm   - [in] the complete alarm structure defining the alarm.
+     *                  This means that the alarm structure must contain a
+     *                  valid alarm_id, and any attribute of the structure that
+     *                  differs from the actual alarm will be updated to match
+     *                  the provided data.
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus alarm_update(
+      Rwcal.CloudAccount account,
+      Rwmon.Alarm alarm);
+
+    /**
+     * alarm_delete
+     *
+     * @param account  - [in] the credentials required to delete the alarm.
+     * @param alarm_id - [in] the identifier of the alarm to delete
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus alarm_delete(
+      Rwcal.CloudAccount account,
+      string alarm_id);
+
+    /**
+     * alarm_list
+     *
+     * @param account  - [in] the credentials required to list the alarms.
+     * @param alarms   - [out] a list of alarms
+     *
+     * @return RwStatus
+     */
+    public abstract RwTypes.RwStatus alarm_list(
+      Rwcal.CloudAccount account,
+      out Rwmon.Alarm[] alarms);
+  }
+}
diff --git a/rwmon/plugins/vala/rwmon_ceilometer/CMakeLists.txt b/rwmon/plugins/vala/rwmon_ceilometer/CMakeLists.txt
new file mode 100644 (file)
index 0000000..8a8e353
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwmon_ceilometer rwmon_ceilometer.py)
diff --git a/rwmon/plugins/vala/rwmon_ceilometer/Makefile b/rwmon/plugins/vala/rwmon_ceilometer/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/plugins/vala/rwmon_ceilometer/rwmon_ceilometer.py b/rwmon/plugins/vala/rwmon_ceilometer/rwmon_ceilometer.py
new file mode 100644 (file)
index 0000000..bc6506c
--- /dev/null
@@ -0,0 +1,536 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import collections
+import dateutil.parser
+import json
+import logging
+import urllib.parse
+
+import requests
+
+import gi
+gi.require_version('RwTypes', '1.0')
+gi.require_version('RwcalYang', '1.0')
+gi.require_version('RwmonYang', '1.0')
+
+from gi.repository import (
+    GObject,
+    RwMon,
+    RwTypes,
+    RwmonYang,
+    )
+
+import rift.rwcal.openstack as openstack_drv
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwmon.ceilometer')
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class UnknownService(Exception):
+    pass
+
+
+class CeilometerMonitoringPlugin(GObject.Object, RwMon.Monitoring):
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._driver_class = openstack_drv.OpenstackDriver
+
+    def _get_driver(self, account):
+        return self._driver_class(username = account.openstack.key,
+                                  password = account.openstack.secret,
+                                  auth_url = account.openstack.auth_url,
+                                  tenant_name = account.openstack.tenant,
+                                  mgmt_network = account.openstack.mgmt_network)
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-monitor-log",
+                    subcategory="ceilometer",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_nfvi_metrics(self, account, vm_id):
+        try:
+            samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
+
+            metrics = RwmonYang.NfviMetrics()
+
+            vcpu = samples.get("cpu_util", {})
+            memory = samples.get("memory_usage", {})
+            storage = samples.get("disk_usage", {})
+
+            metrics.vcpu.utilization = vcpu.get("volume", 0)
+            metrics.memory.used = memory.get("volume", 0)
+            metrics.storage.used = storage.get("volume", 0)
+
+            def convert_timestamp(t):
+                return dateutil.parser.parse(t).timestamp()
+
+            timestamps = []
+            if 'timestamp' in vcpu:
+                timestamps.append(convert_timestamp(vcpu['timestamp']))
+            if 'timestamp' in memory:
+                timestamps.append(convert_timestamp(memory['timestamp']))
+            if 'timestamp' in storage:
+                timestamps.append(convert_timestamp(storage['timestamp']))
+
+            metrics.timestamp = max(timestamps) if timestamps else 0.0
+
+            return metrics
+
+        except Exception as e:
+            logger.exception(e)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_nfvi_vcpu_metrics(self, account, vm_id):
+        try:
+            samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
+
+            metrics = RwmonYang.NfviMetrics_Vcpu()
+            metrics.utilization = samples.get("cpu_util", 0)
+
+            return metrics
+
+        except Exception as e:
+            logger.exception(e)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_nfvi_memory_metrics(self, account, vm_id):
+        try:
+            samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
+
+            metrics = RwmonYang.NfviMetrics_Memory()
+            metrics.used = samples.get("memory_usage", 0)
+
+            return metrics
+
+        except Exception as e:
+            logger.exception(e)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_nfvi_storage_metrics(self, account, vm_id):
+        try:
+            samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
+
+            metrics = RwmonYang.NfviMetrics_Storage()
+            metrics.used = samples.get("disk_usage", 0)
+
+            return metrics
+
+        except Exception as e:
+            logger.exception(e)
+
+    @rwstatus(ret_on_failure=[False])
+    def do_nfvi_metrics_available(self, account):
+        try:
+            endpoint = self._get_driver(account).ceilo_meter_endpoint()
+        except Exception:
+            return False
+
+        return endpoint is not None
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_create(self, account, vim_id, alarm):
+        # Retrieve a token using account information
+        token = openstack_auth_token(account)
+        service = token.service("ceilometer")
+        headers = {"content-type": "application/json", "x-auth-token": token.id}
+
+        # Convert the alarm from its YANG representation into something that
+        # can be passed to the openstack interface
+        ceilometer_alarm = CeilometerAlarm.from_gi_obj(alarm, vim_id).to_dict()
+
+        # POST the data to ceilometer
+        response = requests.post(
+                service.url.public + "/v2/alarms",
+                headers=headers,
+                data=json.dumps(ceilometer_alarm),
+                timeout=5,
+                )
+
+        # Returns the response object and update the alarm ID
+        obj = response.json()
+        alarm.alarm_id = obj['alarm_id']
+        return obj
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_update(self, account, alarm):
+        # Retrieve a token using account information
+        token = openstack_auth_token(account)
+        service = token.service("ceilometer")
+        headers = {"content-type": "application/json", "x-auth-token": token.id}
+
+        # Convert the alarm from its YANG representation into something that
+        # can be passed to the openstack interface
+        ceilometer_alarm = CeilometerAlarm.from_gi_obj(alarm).to_dict()
+
+        # PUT the data to ceilometer
+        response = requests.put(
+                service.url.public + "/v2/alarms/{}".format(alarm.alarm_id),
+                headers=headers,
+                data=json.dumps(ceilometer_alarm),
+                timeout=5,
+                )
+
+        return response.json()
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_delete(self, account, alarm_id):
+        # Retrieve a token using account information
+        token = openstack_auth_token(account)
+        service = token.service("ceilometer")
+        headers = {"content-type": "application/json", "x-auth-token": token.id}
+
+        # DELETE the alarm
+        _ = requests.delete(
+                service.url.public + "/v2/alarms/{}".format(alarm_id),
+                headers=headers,
+                timeout=5,
+                )
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_list(self, account):
+        # Retrieve a token using account information
+        token = openstack_auth_token(account)
+        service = token.service("ceilometer")
+        headers = {"x-auth-token": token.id}
+
+        # GET a list of alarms
+        response = requests.get(
+                service.url.public + "/v2/alarms",
+                headers=headers,
+                timeout=5,
+                )
+
+        return response.json()
+
+
+class OpenstackAuthTokenV2(object):
+    def __init__(self, data):
+        self._data = data
+
+    @classmethod
+    def request(cls, account):
+        """Create an OpenstackAuthTokenV2 using account information
+
+        Arguments:
+            account - an RwcalYang.CloudAccount object
+
+        Returns:
+            an openstack token
+
+        """
+        headers = {"content-type": "application/json"}
+        data = json.dumps({
+            "auth": {
+                "tenantName": account.openstack.tenant,
+                "passwordCredentials": {
+                    "username": account.openstack.key,
+                    "password": account.openstack.secret,
+                    }
+                }
+            })
+
+        url = "{}/tokens".format(account.openstack.auth_url)
+        response = requests.post(url, headers=headers, data=data)
+        response.raise_for_status()
+
+        return cls(response.json())
+
+    @property
+    def id(self):
+        """The token identifier"""
+        return self._data["access"]["token"]["id"]
+
+    def service(self, name):
+        """Returns information about the specified service
+
+        Arguments:
+            name - the name of the service to return
+
+        Raises:
+            If the requested service cannot be found, an UnknownService
+            exception is raised.
+
+        Returns:
+            an OpenstackService object
+
+        """
+        for s in self._data["access"]["serviceCatalog"]:
+            if s["name"] == name:
+                return OpenstackService(
+                        name=name,
+                        url=OpenstackServiceURLs(
+                            public=s["endpoints"][0]["publicURL"],
+                            internal=s["endpoints"][0]["internalURL"],
+                            admin=s["endpoints"][0]["adminURL"],
+                            )
+                        )
+
+        raise UnknownService(name)
+
+
+class OpenstackAuthTokenV3(object):
+    def __init__(self, token, data):
+        self._data = data
+        self._token = token
+
+    @classmethod
+    def request(cls, account):
+        """Create an OpenstackAuthTokenV3 using account information
+
+        Arguments:
+            account - an RwcalYang.CloudAccount object
+
+        Returns:
+            an openstack token
+
+        """
+        headers = {"content-type": "application/json"}
+        data = json.dumps({
+            "auth": {
+                "identity": {
+                    "methods": ["password"],
+                    "password": {
+                        "user": {
+                            "name": account.openstack.key,
+                            "password": account.openstack.secret,
+                            "domain": {"id": "default"},
+                            }
+                        }
+                    },
+                "scope": {
+                    "project": {
+                        "name": account.openstack.tenant,
+                        "domain": {"id": "default"},
+                        }
+                    }
+                }
+            })
+
+        url = account.openstack.auth_url + "/auth/tokens"
+
+        response = requests.post(url, headers=headers, data=data)
+        response.raise_for_status()
+
+        return cls(response.headers['x-subject-token'], response.json())
+
+    @property
+    def id(self):
+        """The token identifier"""
+        return self._token
+
+    def service(self, name):
+        """Returns information about the specified service
+
+        Arguments:
+            name - the name of the service to return
+
+        Raises:
+            If the requested service cannot be found, an UnknownService
+            exception is raised.
+
+        Returns:
+            an OpenstackService object
+
+        """
+        for s in self._data["token"]["catalog"]:
+            if s["name"] == name:
+                endpoints = {e["interface"]:e["url"] for e in s["endpoints"]}
+                return OpenstackService(
+                        name=name,
+                        url=OpenstackServiceURLs(
+                            public=endpoints["public"],
+                            internal=endpoints["internal"],
+                            admin=endpoints["admin"],
+                            )
+                        )
+
+        raise UnknownService(name)
+
+
+def openstack_auth_token(account):
+    url = urllib.parse.urlparse(account.openstack.auth_url)
+
+    if url.path in ('/v3',):
+        return OpenstackAuthTokenV3.request(account)
+
+    if url.path in ('/v2.0', 'v2.1'):
+        return OpenstackAuthTokenV2.request(account)
+
+    raise ValueError("Unrecognized keystone version")
+
+
+class OpenstackService(collections.namedtuple(
+    "OpenstackServer",
+    "name url")):
+    pass
+
+
+class OpenstackServiceURLs(collections.namedtuple(
+    "OpenstackServiceURLs",
+    "public internal admin")):
+    pass
+
+
+class CeilometerAlarm(collections.namedtuple(
+    "CeilometerAlarm",
+    "name type description severity repeat_actions enabled alarm_actions ok_actions insufficient_data_actions threshold_rule")):
+    @classmethod
+    def from_gi_obj(cls, alarm, vim_id=None):
+        severity = CeilometerAlarmSeverity.from_gi_obj(alarm.severity).severity
+        actions = CeilometerAlarmActions.from_gi_obj(alarm.actions)
+
+        alarm_id = alarm.alarm_id if vim_id is None else vim_id
+        threshold_rule = CeilometerThresholdRule.from_gi_obj(alarm_id, alarm)
+
+        return cls(
+                type="threshold",
+                name=alarm.name,
+                description=alarm.description,
+                severity=severity,
+                repeat_actions=alarm.repeat,
+                enabled=alarm.enabled,
+                threshold_rule=threshold_rule,
+                ok_actions=actions.ok,
+                alarm_actions=actions.alarm,
+                insufficient_data_actions=actions.insufficient_data,
+                )
+
+    def to_dict(self):
+        """Returns a dictionary containing the tuple data"""
+        def recursive_to_dict(obj):
+            if not hasattr(obj, '_fields'):
+                return obj
+
+            return {k: recursive_to_dict(getattr(obj, k)) for k in obj._fields}
+
+        return recursive_to_dict(self)
+
+
+class CeilometerThresholdRule(collections.namedtuple(
+    "CeilometerThresholdRule",
+    "evaluation_periods threshold statistic meter_name comparison_operator period query")):
+    @classmethod
+    def from_gi_obj(cls, vim_id, alarm):
+        meter = CeilometerAlarmMeter.from_gi_obj(alarm.metric).meter
+        statistic = CeilometerAlarmStatistic.from_gi_obj(alarm.statistic).statistic
+        operation = CeilometerAlarmOperation.from_gi_obj(alarm.operation).operation
+
+        return cls(
+                evaluation_periods=alarm.evaluations,
+                threshold=alarm.value,
+                statistic=statistic,
+                meter_name=meter,
+                comparison_operator=operation,
+                period=alarm.period,
+                query=[{
+                    "op": "eq",
+                    "field": "resource_id",
+                    "value": vim_id,
+                    }]
+                )
+
+    def to_dict(self):
+        """Returns a dictionary containing the tuple data"""
+        def recursive_to_dict(obj):
+            if not hasattr(obj, '_fields'):
+                return obj
+
+            return {k: recursive_to_dict(getattr(obj, k)) for k in obj._fields}
+
+        return recursive_to_dict(self)
+
+
+class CeilometerAlarmMeter(collections.namedtuple(
+    "CeiloemterAlarmMeter",
+    "meter")):
+    __mapping__ = {
+            "CPU_UTILIZATION" : "cpu_util",
+            "MEMORY_UTILIZATION" : "memory_usage",
+            "STORAGE_UTILIZATION" : "disk_usage",
+            }
+    @classmethod
+    def from_gi_obj(cls, meter):
+        return cls(meter=cls.__mapping__[meter])
+
+
+class CeilometerAlarmStatistic(collections.namedtuple(
+    "CeilometerAlarmStatistic",
+    "statistic")):
+    __mapping__ = {
+            "AVERAGE": "avg",
+            "MINIMUM": "min",
+            "MAXIMUM": "max",
+            "COUNT": "count",
+            "SUM": "sum",
+            }
+    @classmethod
+    def from_gi_obj(cls, statistic):
+        return cls(statistic=cls.__mapping__[statistic])
+
+
+class CeilometerAlarmOperation(collections.namedtuple(
+    "CeilometerAlarmOperation",
+    "operation")):
+    __mapping__ = {
+            "LT": "lt",
+            "LE": "le",
+            "EQ": "eq",
+            "GE": "ge",
+            "GT": "gt",
+            }
+    @classmethod
+    def from_gi_obj(cls, operation):
+        return cls(operation=cls.__mapping__[operation])
+
+
+class CeilometerAlarmSeverity(collections.namedtuple(
+    "CeilometerAlarmSeverity",
+    "severity")):
+    __mapping__ = {
+            "LOW": "low",
+            "MODERATE": "moderate",
+            "CRITICAL": "critical",
+            }
+    @classmethod
+    def from_gi_obj(cls, severity):
+        return cls(severity=cls.__mapping__[severity])
+
+
+class CeilometerAlarmActions(collections.namedtuple(
+    "CeilometerAlarmActions",
+    "ok alarm insufficient_data")):
+    @classmethod
+    def from_gi_obj(cls, actions):
+        return cls(
+            ok=[obj.url for obj in actions.ok],
+            alarm=[obj.url for obj in actions.alarm],
+            insufficient_data=[obj.url for obj in actions.insufficient_data],
+            )
diff --git a/rwmon/plugins/vala/rwmon_mock/CMakeLists.txt b/rwmon/plugins/vala/rwmon_mock/CMakeLists.txt
new file mode 100644 (file)
index 0000000..b619aa8
--- /dev/null
@@ -0,0 +1,20 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+include(rift_plugin)
+
+rift_install_python_plugin(rwmon_mock rwmon_mock.py)
diff --git a/rwmon/plugins/vala/rwmon_mock/Makefile b/rwmon/plugins/vala/rwmon_mock/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/plugins/vala/rwmon_mock/rwmon_mock.py b/rwmon/plugins/vala/rwmon_mock/rwmon_mock.py
new file mode 100644 (file)
index 0000000..2b24981
--- /dev/null
@@ -0,0 +1,121 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import logging
+
+from gi.repository import (
+    GObject,
+    RwMon,
+    RwTypes,
+    RwmonYang as rwmon,
+    )
+
+import rw_status
+import rwlogger
+
+logger = logging.getLogger('rwmon.mock')
+
+
+rwstatus = rw_status.rwstatus_from_exc_map({
+    IndexError: RwTypes.RwStatus.NOTFOUND,
+    KeyError: RwTypes.RwStatus.NOTFOUND,
+    })
+
+
+class NullImpl(object):
+    def nfvi_metrics(self, account, vm_id):
+        return rwmon.NfviMetrics()
+
+    def nfvi_vcpu_metrics(self, account, vm_id):
+        return rwmon.NfviMetrics_Vcpu()
+
+    def nfvi_memory_metrics(self, account, vm_id):
+        return rwmon.NfviMetrics_Memory()
+
+    def nfvi_storage_metrics(self, account, vm_id):
+        return rwmon.NfviMetrics_Storage()
+
+    def nfvi_metrics_available(self, account):
+        return True
+
+    def alarm_create(self, account, vim_id, alarm):
+        pass
+
+    def alarm_update(self, account, alarm):
+        pass
+
+    def alarm_delete(self, account, alarm_id):
+        pass
+
+    def alarm_list(self, account):
+        return list()
+
+
+class MockMonitoringPlugin(GObject.Object, RwMon.Monitoring):
+    def __init__(self):
+        GObject.Object.__init__(self)
+        self._impl = NullImpl()
+
+    @rwstatus
+    def do_init(self, rwlog_ctx):
+        if not any(isinstance(h, rwlogger.RwLogger) for h in logger.handlers):
+            logger.addHandler(
+                rwlogger.RwLogger(
+                    category="rw-monitor-log",
+                    subcategory="mock",
+                    log_hdl=rwlog_ctx,
+                )
+            )
+
+    @rwstatus
+    def do_nfvi_metrics(self, account, vm_id):
+        return self._impl.nfvi_metrics(account, vm_id)
+
+    @rwstatus
+    def do_nfvi_vcpu_metrics(self, account, vm_id):
+        return self._impl.nfvi_vcpu_metrics(account, vm_id)
+
+    @rwstatus
+    def do_nfvi_memory_metrics(self, account, vm_id):
+        return self._impl.nfvi_memory_metrics(account, vm_id)
+
+    @rwstatus
+    def do_nfvi_storage_metrics(self, account, vm_id):
+        return self._impl.nfvi_storage_metrics(account, vm_id)
+
+    @rwstatus
+    def do_nfvi_metrics_available(self, account):
+        return self._impl.nfvi_metrics_available(account)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_create(self, account, vim_id, alarm):
+        return self._impl.alarm_create(account, vim_id, alarm)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_update(self, account, alarm):
+        return self._impl.alarm_update(account, alarm)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_delete(self, account, alarm_id):
+        return self._impl.alarm_delete(account, alarm_id)
+
+    @rwstatus(ret_on_failure=[None])
+    def do_alarm_list(self, account):
+        return self._impl.alarm_list(account)
+
+    def set_impl(self, impl):
+        self._impl = impl
diff --git a/rwmon/plugins/yang/CMakeLists.txt b/rwmon/plugins/yang/CMakeLists.txt
new file mode 100644 (file)
index 0000000..717417b
--- /dev/null
@@ -0,0 +1,39 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# 
+
+##
+# Parse the yang files
+##
+
+include(rift_yang)
+
+set(source_yang_files rwmon.yang)
+
+rift_add_yang_target(
+  TARGET rwmon_yang
+  YANG_FILES ${source_yang_files}
+  COMPONENT ${PKG_LONG_NAME}
+  DEPENDS
+    mano-types_yang
+  LIBRARIES
+    rwschema_yang_gen
+    rwyang
+    rwlog
+    rwlog-mgmt_yang_gen
+    mano-types_yang_gen
+)
+
diff --git a/rwmon/plugins/yang/Makefile b/rwmon/plugins/yang/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/plugins/yang/rwmon.yang b/rwmon/plugins/yang/rwmon.yang
new file mode 100644 (file)
index 0000000..20c364d
--- /dev/null
@@ -0,0 +1,76 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ *
+ */
+
+module rwmon
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rwmon";
+  prefix "rwmon";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  import mano-types {
+    prefix "manotypes";
+  }
+
+  revision 2015-10-28 {
+    description
+        "Initial revision.";
+    reference
+        "RIFT monitoring";
+  }
+
+  container nfvi-metrics {
+    rwpb:msg-new NfviMetrics;
+
+    leaf timestamp {
+      description
+          "This is the time when the metric was captured. The timestamp is
+          represented as the number of seconds since the beginning of the Unix
+          epoch.";
+      type decimal64 {
+        fraction-digits 3;
+      }
+    }
+
+    uses manotypes:nfvi-metrics;
+  }
+
+  container alarm {
+    rwpb:msg-new Alarm;
+
+    uses manotypes:alarm;
+  }
+}
+
+/* vim: set ts=2:sw=2: */
diff --git a/rwmon/test/CMakeLists.txt b/rwmon/test/CMakeLists.txt
new file mode 100644 (file)
index 0000000..0351ed5
--- /dev/null
@@ -0,0 +1,21 @@
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+rift_unittest(utest_rwmon.py
+  TEST_ARGS python3 ${CMAKE_CURRENT_SOURCE_DIR}/utest_rwmon.py)
+
+
diff --git a/rwmon/test/Makefile b/rwmon/test/Makefile
new file mode 100644 (file)
index 0000000..2b691a8
--- /dev/null
@@ -0,0 +1,36 @@
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+# 
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
diff --git a/rwmon/test/utest_rwmon.py b/rwmon/test/utest_rwmon.py
new file mode 100644 (file)
index 0000000..83356c0
--- /dev/null
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+
+# 
+#   Copyright 2016 RIFT.IO Inc
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+import os
+import unittest
+import xmlrunner
+
+import rw_peas
+
+import gi
+gi.require_version("RwcalYang", "1.0")
+gi.require_version("RwMon", "1.0")
+gi.require_version("RwmonYang", "1.0")
+gi.require_version("RwTypes", "1.0")
+
+
+from gi.repository import (
+        RwmonYang,
+        RwcalYang,
+        RwTypes,
+        )
+
+
+class TestNullDataSource(unittest.TestCase):
+    def setUp(self):
+        plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+        self.plugin = plugin.get_interface("Monitoring")
+
+        self.account = RwcalYang.CloudAccount()
+        self.vim_id = "test-vim-id"
+
+    def test_null_data_source(self):
+        """
+        By default, the NFVI metrics plugin mock installs a 'null'
+        implementation that simply returns empty NFVI structures.
+
+        """
+        status, metrics = self.plugin.nfvi_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics, RwmonYang.NfviMetrics())
+
+        status, metrics = self.plugin.nfvi_vcpu_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics, RwmonYang.NfviMetrics_Vcpu())
+
+        status, metrics = self.plugin.nfvi_memory_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics, RwmonYang.NfviMetrics_Memory())
+
+        status, metrics = self.plugin.nfvi_storage_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics, RwmonYang.NfviMetrics_Storage())
+
+        status, result = self.plugin.nfvi_metrics_available(self.account)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertTrue(result)
+
+
+class TestMockDataSource(unittest.TestCase):
+    def setUp(self):
+        plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+        self.plugin = plugin.get_interface("Monitoring")
+        self.plugin.set_impl(MockDataSource())
+
+        self.account = RwcalYang.CloudAccount()
+        self.vim_id = "test-vim-id"
+
+    def test_mock_data_source(self):
+        """
+        This test installs a mock data source implementation in the plugin,
+        which returns known values. This test simply checks the expected values
+        are indeed returned.
+
+        """
+        expected_vcpu_metrics = RwmonYang.NfviMetrics_Vcpu()
+        expected_vcpu_metrics.utilization = 50.0
+        expected_vcpu_metrics.total = 100
+
+        status, metrics = self.plugin.nfvi_vcpu_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics.total, expected_vcpu_metrics.total)
+        self.assertEqual(metrics.utilization, expected_vcpu_metrics.utilization)
+
+        expected_memory_metrics = RwmonYang.NfviMetrics_Memory()
+        expected_memory_metrics.used = 90
+        expected_memory_metrics.total = 100
+        expected_memory_metrics.utilization = 90/100
+
+        status, metrics = self.plugin.nfvi_memory_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics.used, expected_memory_metrics.used)
+        self.assertEqual(metrics.total, expected_memory_metrics.total)
+        self.assertEqual(metrics.utilization, expected_memory_metrics.utilization)
+
+        expected_storage_metrics = RwmonYang.NfviMetrics_Storage()
+        expected_storage_metrics.used = 300
+        expected_storage_metrics.total = 500
+        expected_storage_metrics.utilization = 300/500
+
+        status, metrics = self.plugin.nfvi_storage_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics.used, expected_storage_metrics.used)
+        self.assertEqual(metrics.total, expected_storage_metrics.total)
+        self.assertEqual(metrics.utilization, expected_storage_metrics.utilization)
+
+        status, metrics = self.plugin.nfvi_metrics(self.account, self.vim_id)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertEqual(metrics.vcpu.total, expected_vcpu_metrics.total)
+        self.assertEqual(metrics.vcpu.utilization, expected_vcpu_metrics.utilization)
+        self.assertEqual(metrics.storage.used, expected_storage_metrics.used)
+        self.assertEqual(metrics.storage.total, expected_storage_metrics.total)
+        self.assertEqual(metrics.storage.utilization, expected_storage_metrics.utilization)
+        self.assertEqual(metrics.memory.used, expected_memory_metrics.used)
+        self.assertEqual(metrics.memory.total, expected_memory_metrics.total)
+        self.assertEqual(metrics.memory.utilization, expected_memory_metrics.utilization)
+
+        status, result = self.plugin.nfvi_metrics_available(self.account)
+        self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
+        self.assertTrue(result)
+
+
+class TestMockAlarms(unittest.TestCase):
+    def setUp(self):
+        plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
+
+        self.mock = MockAlarmInterface()
+        self.plugin = plugin.get_interface("Monitoring")
+        self.plugin.set_impl(self.mock)
+
+        self.account = RwcalYang.CloudAccount()
+        self.alarm = RwmonYang.Alarm(name='test-alarm')
+        self.vim_id = 'test-vim-id'
+
+    def test(self):
+        """
+        This test uses a simple, mock implementation of the alarm interface to
+        check that create, update, delete, and list work correctly.
+
+        """
+        # In the beginning, there were no alarms
+        _, alarms = self.plugin.do_alarm_list(self.account)
+        self.assertEqual(0, len(alarms))
+
+        # Create two alarms
+        self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.Alarm())
+        self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.Alarm())
+
+        _, alarms = self.plugin.do_alarm_list(self.account)
+        self.assertEqual(2, len(alarms))
+
+        # The alarms should have no names
+        alarms.sort(key=lambda a: a.alarm_id)
+        self.assertEqual('test-alarm-id-1', alarms[0].alarm_id)
+        self.assertEqual('test-alarm-id-2', alarms[1].alarm_id)
+        self.assertTrue(all(a.name is None for a in alarms))
+
+        # Give names to the alarms
+        alarms[0].name = 'test-alarm'
+        alarms[1].name = 'test-alarm'
+        self.plugin.do_alarm_update(self.account, alarms[0])
+        self.plugin.do_alarm_update(self.account, alarms[1])
+        self.assertTrue(all(a.name == 'test-alarm' for a in alarms))
+
+        # Delete the alarms
+        self.plugin.do_alarm_delete(self.account, alarms[0].alarm_id)
+        self.plugin.do_alarm_delete(self.account, alarms[1].alarm_id)
+        _, alarms = self.plugin.do_alarm_list(self.account)
+        self.assertEqual(0, len(alarms))
+
+
+class MockAlarmInterface(object):
+    """
+    This class is mock impementation for the alarm interface on the monitoring
+    plugin.
+    """
+
+    def __init__(self):
+        self.count = 0
+        self.alarms = dict()
+
+    def alarm_create(self, account, vim_id, alarm):
+        self.count += 1
+        alarm_id = 'test-alarm-id-{}'.format(self.count)
+        alarm.alarm_id = alarm_id
+        self.alarms[alarm_id] = alarm
+
+    def alarm_update(self, account, alarm):
+        assert alarm.alarm_id is not None
+        self.alarms[alarm.alarm_id] = alarm
+
+    def alarm_delete(self, account, alarm_id):
+        del self.alarms[alarm_id]
+
+    def alarm_list(self, account):
+        return list(self.alarms.values())
+
+
+class MockDataSource(object):
+    """
+    This class implements the data source interface used by the monitoring
+    plugin and provides mock data for testing.
+    """
+
+    def nfvi_metrics(self, account, vm_id):
+        metrics = RwmonYang.NfviMetrics()
+        metrics.vcpu = self.nfvi_vcpu_metrics(account, vm_id)
+        metrics.memory = self.nfvi_memory_metrics(account, vm_id)
+        metrics.storage = self.nfvi_storage_metrics(account, vm_id)
+        return metrics
+
+    def nfvi_vcpu_metrics(self, account, vm_id):
+        metrics = RwmonYang.NfviMetrics_Vcpu()
+        metrics.total = 100
+        metrics.utilization = 50.0
+        return metrics
+
+    def nfvi_memory_metrics(self, account, vm_id):
+        metrics = RwmonYang.NfviMetrics_Memory()
+        metrics.used = 90
+        metrics.total = 100
+        metrics.utilization = 90/100
+        return metrics
+
+    def nfvi_storage_metrics(self, account, vm_id):
+        metrics = RwmonYang.NfviMetrics_Storage()
+        metrics.used = 300
+        metrics.total = 500
+        metrics.utilization = 300/500
+        return metrics
+
+    def nfvi_metrics_available(self, account):
+        return True
+
+
+def main():
+    runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+    unittest.main(testRunner=runner)
+
+
+if __name__ == '__main__':
+    main()
+
+# vim: sw=4
diff --git a/rwso/plugins/cli/cli_so_schema_listing.txt b/rwso/plugins/cli/cli_so_schema_listing.txt
new file mode 100644 (file)
index 0000000..3031b19
--- /dev/null
@@ -0,0 +1,31 @@
+rw-base
+rw-mgmtagt
+rw-manifest
+rw-vcs
+rwlog-mgmt
+rw-dts
+rwmsg-data
+rw-dtsperf
+rwshell-mgmt
+rw-debug
+rw-dtsperfmgr
+rw-memlog
+mano-base
+rw-sorch
+rw-restportforward
+mano-types
+rw-yang-types
+rw-log
+rwvcs-types
+rw-netconf
+rwcal
+rw-pb-ext
+rw-notify-ext
+rw-mgmt-schema
+rw-cli-ext
+ietf-inet-types
+ietf-yang-types
+vnfr
+nsr
+ietf-restconf-monitoring
+ietf-netconf-notifications
diff --git a/rwso/plugins/yang/rw-sorch-log.yang b/rwso/plugins/yang/rw-sorch-log.yang
new file mode 100644 (file)
index 0000000..57ffae6
--- /dev/null
@@ -0,0 +1,136 @@
+
+/*
+ * 
+ *   Copyright 2016 RIFT.IO Inc
+ *
+ *   Licensed under the Apache License, Version 2.0 (the "License");
+ *   you may not use this file except in compliance with the License.
+ *   You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an "AS IS" BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing permissions and
+ *   limitations under the License.
+ *
+ */
+
+
+/**
+ * @file rw-sorch-log.yang
+ * @author Rift.IO
+ * @date 03/02/2015
+ * @brief RiftWare Log Event Definitions for rw-sorch logging
+ */
+
+module rw-sorch-log
+{
+  namespace "http://riftio.com/ns/riftware-1.0/rw-sorch-log";
+  prefix "rw-sorch-log";
+
+  import rw-base {
+    prefix rwbase;
+  }
+
+  import rw-pb-ext {
+    prefix "rwpb";
+  }
+
+  import rw-yang-types {
+    prefix "rwt";
+  }
+
+  import rw-notify-ext {
+    prefix "rwnotify";
+  }
+
+  import rw-log {
+    prefix "rwlog";
+  }
+
+  revision 2014-12-30 {
+    description
+      "Initial revision.";
+    reference
+      "RIFT Launchpad Logging";
+  }
+
+  /*
+   * Generic Logger Log Events - ID space 160000 - 160099
+   */
+  notification debug {
+    rwpb:msg-new Debug;
+    rwnotify:log-event-id 160000;
+      description
+         "Generic Debug Log";
+      uses rwlog:severity-debug;
+      leaf category {
+        type string;
+      }
+      leaf log  {
+        type string;
+      }
+  }
+
+  notification info {
+    rwpb:msg-new Info;
+    rwnotify:log-event-id 160001;
+      description
+         "Generic Info Log";
+      uses rwlog:severity-info;
+      leaf category {
+        type string;
+      }
+      leaf log  {
+        type string;
+      }
+  }
+
+  notification warn {
+    rwpb:msg-new Warn;
+    rwnotify:log-event-id 160002;
+      description
+         "Generic Warning Log";
+      uses rwlog:severity-warning;
+      leaf category {
+        type string;
+      }
+      leaf log  {
+        type string;
+      }
+  }
+
+  notification error {
+    rwpb:msg-new Error;
+    rwnotify:log-event-id 160003;
+      description
+         "Generic Warning Log";
+      uses rwlog:severity-error;
+      leaf category {
+        type string;
+      }
+      leaf log  {
+        type string;
+      }
+  }
+
+  notification critical {
+    rwpb:msg-new Critical;
+    rwnotify:log-event-id 160004;
+      description
+         "Generic Critical Log";
+      uses rwlog:severity-critical;
+      leaf category {
+        type string;
+      }
+      leaf log  {
+        type string;
+      }
+  }
+
+  /*
+   * END - generic log events
+   */
+}