#!/usr/bin/env bash
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016,2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# BUILD.sh
#
-# This is a top-level build script for RIFT.io
+# This is a top-level build script for OSM SO or UI
#
# Arguments and options: use -h or --help
#
# dependencies -- requires sudo rights
+MODULE=SO
+
# Defensive bash programming flags
set -o errexit # Exit on any error
trap 'echo ERROR: Command failed: \"$BASH_COMMAND\"' ERR
# Options and arguments
# There
-params="$(getopt -o suhb: -l install-so,install-ui,no-mkcontainer,build-ui:,help --name "$0" -- "$@")"
+params="$(getopt -o h -l install,help --name "$0" -- "$@")"
if [ $? != 0 ] ; then echo "Failed parsing options." >&2 ; exit 1 ; fi
eval set -- $params
-installSO=false
-installUI=false
-runMkcontainer=true
-UIPathToBuild=
+installFromPackages=false
while true; do
case "$1" in
- -s|--install-so) installSO=true; shift;;
- -u|--install-ui) installUI=true; shift;;
- -b|--build-ui) shift; UIPathToBuild=$1; shift;;
- --no-mkcontainer) runMkcontainer=false; shift;;
+ --install) installFromPackages=true; shift;;
-h|--help)
echo
echo "NAME:"
echo
echo "SYNOPSIS:"
echo " $0 -h|--help"
- echo " $0 [-s] [-u|-b PATH-TO-UI-REPO] [PLATFORM_REPOSITORY] [PLATFORM_VERSION]"
+ echo " $0 [--install] [PLATFORM_REPOSITORY] [PLATFORM_VERSION]"
echo
echo "DESCRIPTION:"
- echo " Prepare current system to run SO and UI. By default, the system"
- echo " is set up to support building SO and UI; optionally, either or"
- echo " both SO and UI can be installed from a Debian package repository."
+ echo " Prepare current system to run $MODULE. By default, the system"
+ echo " is set up to support building $MODULE; optionally, "
+ echo " $MODULE can be installed from a Debian package repository."
echo
- echo " -s|--install-so: install SO from package"
- echo " -u|--install-ui: install UI from package"
- echo " -b|--build-ui PATH-TO-UI-REPO: build the UI in the specified repo"
- echo " --no-mkcontainer: do not run mkcontainer, used for debugging script"
+ echo " --install: install $MODULE from package"
echo " PLATFORM_REPOSITORY (optional): name of the RIFT.ware repository."
echo " PLATFORM_VERSION (optional): version of the platform packages to be installed."
echo
esac
done
-if $installUI && [[ $UIPathToBuild ]]; then
- echo "Cannot both install and build the UI!"
- exit 1
-fi
-
-if [[ $UIPathToBuild && ! -d $UIPathToBuild ]]; then
- echo "Not a directory: $UIPathToBuild"
- exit 1
-fi
-
# Turn this on after handling options, so the output doesn't get cluttered.
set -x # Print commands before executing them
-###############################################################################
-# Find the platform
-PYTHON=python
-if [[ ! -f /usr/bin/python ]]; then
- PYTHON=python3
-fi
-
-if $PYTHON -mplatform | grep -qi fedora; then
- PLATFORM=fc20
-elif $PYTHON -mplatform | grep -qi ubuntu; then
- PLATFORM=ub16
-else
- echo "Unknown platform"
- exit 1
-fi
-
###############################################################################
# Set up repo and version
-if [[ $PLATFORM == ub16 ]]; then
- PLATFORM_REPOSITORY=${1:-OSM}
- PLATFORM_VERSION=${2:-4.4.2.1.62754}
-elif [[ $PLATFORM == fc20 ]]; then
- PLATFORM_REPOSITORY=${1:-OSM} # change to OSM when published
- PLATFORM_VERSION=${2:-4.4.2.1.62754}
-else
- echo "Internal error: unknown platform $PLATFORM"
- exit 1
-fi
+PLATFORM_REPOSITORY=${1:-osm-rbac}
+PLATFORM_VERSION=${2:-5.1.3.9999.70283}
###############################################################################
# Main block
-# Disable apt-daily.service and apt-daily.timer
-
-DAILY_TIMER='apt-daily.timer'
-DAILY_SERVICE='apt-daily.service'
-if [ $(systemctl is-active $DAILY_TIMER) = "active" ]
-then
- systemctl stop $DAILY_TIMER
- systemctl disable $DAILY_TIMER
- systemctl disable $DAILY_SERVICE
-fi
-
# must be run from the top of a workspace
cd $(dirname $0)
-# inside RIFT.io this is an NFS mount
-# so just to be safe
-test -h /usr/rift && sudo rm -f /usr/rift
-
-if [[ $PLATFORM == ub16 ]]; then
- # enable the right repos
- curl http://repos.riftio.com/public/xenial-riftware-public-key | sudo apt-key add -
- # the old mkcontainer always enabled release which can be bad
- # so remove it
- sudo rm -f /etc/apt/sources.list.d/release.list /etc/apt/sources.list.d/rbac.list /etc/apt/sources.list.d/OSM.list
- # always use the same file name so that updates will overwrite rather than enable a second repo
- sudo curl -o /etc/apt/sources.list.d/RIFT.list http://buildtracker.riftio.com/repo_file/ub16/${PLATFORM_REPOSITORY}/
- sudo apt-get update
-
- # and install the tools
- sudo apt remove -y rw.toolchain-rwbase tcpdump
- sudo apt-get install -y --allow-downgrades rw.tools-container-tools=${PLATFORM_VERSION} rw.tools-scripts=${PLATFORM_VERSION} python
-elif [[ $PLATFORM == fc20 ]]; then
- # get the container tools from the correct repository
- sudo rm -f /etc/yum.repos.d/private.repo
- sudo curl -o /etc/yum.repos.d/${PLATFORM_REPOSITORY}.repo \
- http://buildtracker.riftio.com/repo_file/fc20/${PLATFORM_REPOSITORY}/
- sudo yum install --assumeyes rw.tools-container-tools rw.tools-scripts
-else
- echo "Internal error: unknown platform $PLATFORM"
- exit 1
-fi
-
-# enable the OSM repository hosted by RIFT.io
-# this contains the RIFT platform code and tools
-# and install of the packages required to build and run
-# this module
-if $runMkcontainer; then
- sudo apt-get install -y libxml2-dev libxslt-dev
- sudo /usr/rift/container_tools/mkcontainer --modes build --modes ext --repo ${PLATFORM_REPOSITORY}
- sudo pip3 install lxml==3.4.0
-fi
-
+# enable the right repos
+curl http://repos.riftio.com/public/xenial-riftware-public-key | sudo apt-key add -
-if [[ $PLATFORM == ub16 ]]; then
- # install the RIFT platform code:
- # remove these packages since some files moved from one to the other, and one was obsoleted
- # ignore failures
+# always use the same file name so that updates will overwrite rather than enable a second repo
+sudo curl -o /etc/apt/sources.list.d/rift.list http://buildtracker.riftio.com/repo_file/ub16/${PLATFORM_REPOSITORY}/
+sudo apt-get update
- PACKAGES="rw.toolchain-rwbase rw.toolchain-rwtoolchain rw.core.mgmt-mgmt rw.core.util-util \
- rw.core.rwvx-rwvx rw.core.rwvx-rwdts rw.automation.core-RWAUTO"
- # this package is obsolete.
- OLD_PACKAGES="rw.core.rwvx-rwha-1.0"
- for package in $PACKAGES $OLD_PACKAGES; do
- sudo apt remove -y $package || true
- done
+sudo apt install -y --allow-downgrades rw.tools-container-tools=${PLATFORM_VERSION} rw.tools-scripts=${PLATFORM_VERSION}
- packages=""
- for package in $PACKAGES; do
- packages="$packages $package=${PLATFORM_VERSION}"
- done
- sudo apt-get install -y --allow-downgrades $packages
-
- sudo apt-get install python-cinderclient
-
- sudo chmod 777 /usr/rift /usr/rift/usr/share
-
- if $installSO; then
- sudo apt-get install -y \
- rw.core.mano-rwcal_yang_ylib-1.0 \
- rw.core.mano-rwconfig_agent_yang_ylib-1.0 \
- rw.core.mano-rwlaunchpad_yang_ylib-1.0 \
- rw.core.mano-mano_yang_ylib-1.0 \
- rw.core.mano-common-1.0 \
- rw.core.mano-rwsdn_yang_ylib-1.0 \
- rw.core.mano-rwsdnal_yang_ylib-1.0 \
- rw.core.mano-rwsdn-1.0 \
- rw.core.mano-mano-types_yang_ylib-1.0 \
- rw.core.mano-rwcal-cloudsim-1.0 \
- rw.core.mano-rwcal-1.0 \
- rw.core.mano-rw_conman_yang_ylib-1.0 \
- rw.core.mano-rwcalproxytasklet-1.0 \
- rw.core.mano-rwlaunchpad-1.0 \
- rw.core.mano-rwcal-openmano-vimconnector-1.0 \
- rw.core.mano-rwcal-propcloud1-1.0 \
- rw.core.mano-lpmocklet_yang_ylib-1.0 \
- rw.core.mano-rwmon-1.0 \
- rw.core.mano-rwcloud_yang_ylib-1.0 \
- rw.core.mano-rwcal-openstack-1.0 \
- rw.core.mano-rw.core.mano_foss \
- rw.core.mano-rwmon_yang_ylib-1.0 \
- rw.core.mano-rwcm-1.0 \
- rw.core.mano-rwcal-mock-1.0 \
- rw.core.mano-rwmano_examples-1.0 \
- rw.core.mano-rwcal-cloudsimproxy-1.0 \
- rw.core.mano-models-1.0 \
- rw.core.mano-rwcal-aws-1.0
- fi
-
- if $installUI; then
- sudo apt-get install -y \
- rw.ui-about \
- rw.ui-logging \
- rw.ui-skyquake \
- rw.ui-accounts \
- rw.ui-composer \
- rw.ui-launchpad \
- rw.ui-debug \
- rw.ui-config \
- rw.ui-dummy_component
- fi
-elif [[ $PLATFORM == fc20 ]]; then
-
- temp=$(mktemp -d /tmp/rw.XXX)
- pushd $temp
-
- # yum does not accept the --nodeps and --replacefiles options so we
- # download first and then install
- yumdownloader rw.toolchain-rwbase-${PLATFORM_VERSION} \
- rw.toolchain-rwtoolchain-${PLATFORM_VERSION} \
- rw.core.mgmt-mgmt-${PLATFORM_VERSION} \
- rw.core.util-util-${PLATFORM_VERSION} \
- rw.core.rwvx-rwvx-${PLATFORM_VERSION} \
- rw.core.rwvx-rwha-1.0-${PLATFORM_VERSION} \
- rw.core.rwvx-rwdts-${PLATFORM_VERSION} \
- rw.automation.core-RWAUTO-${PLATFORM_VERSION}
-
- # Install one at a time so that pre-installed packages will not cause a failure
- for pkg in *rpm; do
- # Check to see if the package is already installed; do not try to install
- # it again if it does, since this causes rpm -i to return failure.
- if rpm -q $(rpm -q -p $pkg) >/dev/null; then
- echo "WARNING: package already installed: $pkg"
- else
- sudo rpm -i --replacefiles --nodeps $pkg
- fi
- done
+if $installFromPackages; then
- popd
- rm -rf $temp
+ # Install module and platform from packages
+ sudo -H /usr/rift/container_tools/mkcontainer --modes $MODULE --repo ${PLATFORM_REPOSITORY} --rw-version ${PLATFORM_VERSION}
- # this file gets in the way of the one generated by the build
- sudo rm -f /usr/rift/usr/lib/libmano_yang_gen.so
-
-
- sudo chmod 777 /usr/rift /usr/rift/usr/share
-
- if $installSO; then
- sudo apt-get install -y \
- rw.core.mc-\*-${PLATFORM_VERSION}
- fi
-
- if $installUI; then
- sudo apt-get install -y \
- rw.ui-about-${PLATFORM_VERSION} \
- rw.ui-logging-${PLATFORM_VERSION} \
- rw.ui-skyquake-${PLATFORM_VERSION} \
- rw.ui-accounts-${PLATFORM_VERSION} \
- rw.ui-composer-${PLATFORM_VERSION} \
- rw.ui-launchpad-${PLATFORM_VERSION} \
- rw.ui-debug-${PLATFORM_VERSION} \
- rw.ui-config-${PLATFORM_VERSION} \
- rw.ui-dummy_component-${PLATFORM_VERSION}
- fi
-
else
- echo "Internal error: unknown platform $PLATFORM"
- exit 1
-fi
-# If you are re-building SO, you just need to run
-# these two steps
-if ! $installSO; then
+ # Install environment to build module
+ sudo -H /usr/rift/container_tools/mkcontainer --modes $MODULE-dev --repo ${PLATFORM_REPOSITORY} --rw-version ${PLATFORM_VERSION}
+
+ # Build and install module
make -j16
sudo make install
-fi
-if [[ $UIPathToBuild ]]; then
- make -C $UIPathToBuild -j16
- sudo make -C $UIPathToBuild install
fi
-echo "Creating Service ...."
-sudo $(dirname $0)/create_launchpad_service
+if [[ $MODULE == SO ]]; then
+ echo "Creating Service ...."
+ sudo /usr/rift/bin/create_launchpad_service
+fi
+
# DO NOT add any code before this and DO NOT
# include this file anywhere else
##
-include(rift_submodule)
+include(rift_submodule NO_POLICY_SCOPE)
include(rift_python)
##
# specific it must be declared in the subdirectory.
##
+# Default package
+set(INSTALL_COMPONENT mano)
+
+option(PRODUCT "Control the details of the build" OSM)
+
+if (PRODUCT STREQUAL "RIFTWARE")
+set(INCLUDE_EXAMPLES ON)
+else()
+set(INCLUDE_EXAMPLES OFF)
+endif()
+
##
# Include the subdirs
##
set(subdirs
common
- examples
models
rwcal
rwmon
rwcm
rwlaunchpad
+ rwprojectmano
)
if (NOT RIFT_AGENT_BUILD STREQUAL "XML_ONLY")
list(APPEND subdirs confd_client)
endif()
+if (INCLUDE_EXAMPLES)
+ message("Including examples")
+ list(APPEND subdirs examples)
+else()
+ message("NOT including examples")
+endif()
+
rift_add_subdirs(SUBDIR_LIST ${subdirs})
+install(FILES BUILD.sh DESTINATION bin COMPONENT installer)
+
+##
+# Set up package details
+##
+
+rift_set_component_package_fields(
+ "mano"
+ DESCRIPTION "RIFT.ware MANO"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-aws"
+ DESCRIPTION "RIFT.ware AWS plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-cloudsim"
+ DESCRIPTION "RIFT.ware cloudsim plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-cloudsimproxy"
+ DESCRIPTION "RIFT.ware cloudsimproxy plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-openmano-vimconnector"
+ DESCRIPTION "RIFT.ware vimconnector plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-openstack"
+ DESCRIPTION "RIFT.ware openstack plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-brocade"
+ DESCRIPTION "RIFT.ware brocade plugin"
+ )
+
+rift_set_component_package_fields(
+ "rwcal-plugin-mock"
+ DESCRIPTION "RIFT.ware mock plugin"
+ )
+
##
# This macro adds targets for documentaion, unittests, code coverage and packaging
##
${CMAKE_CURRENT_SOURCE_DIR}/.cpack-workaround
DESTINATION ${dir})
endif()
-
-
FROM ubuntu:16.04
-RUN apt-get update && apt-get -y install python3 curl build-essential
+RUN apt-get update && apt-get -y install python3 curl build-essential apt-transport-https sudo
RUN curl http://repos.riftio.com/public/xenial-riftware-public-key | apt-key add - && \
- curl -o /etc/apt/sources.list.d/OSM.list http://buildtracker.riftio.com/repo_file/ub16/OSM/ && \
+ curl -o /etc/apt/sources.list.d/rift.list http://buildtracker.riftio.com/repo_file/ub16/OSM/ && \
apt-get update && \
- apt-get -y install rw.toolchain-rwbase \
- rw.toolchain-rwtoolchain \
- rw.core.mgmt-mgmt \
- rw.core.util-util \
- rw.core.rwvx-rwvx \
- rw.core.rwvx-rwdts \
- rw.automation.core-RWAUTO \
- rw.tools-container-tools \
- rw.tools-scripts \
- python-cinderclient \
- libxml2-dev \
- libxslt-dev
+ apt-get -y install \
+ rw.tools-container-tools=5.2.0.0.71033 \
+ rw.tools-scripts=5.2.0.0.71033
-RUN /usr/rift/container_tools/mkcontainer --modes build --modes ext --repo OSM
+RUN /usr/rift/container_tools/mkcontainer --modes SO-dev --repo OSM --rw-version 5.2.0.0.71033
RUN chmod 777 /usr/rift /usr/rift/usr/share
+++ /dev/null
-deps/
-builds/
-
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME common)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs
plugins
python
FILES
rw_gen_package.py
DESTINATION usr/rift/mano/common
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
FILES
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
ResourceProvisioning.cal_interface = plugin.get_interface("Cloud")
ResourceProvisioning.cal_interface.init(ResourceProvisioning.log_hdl)
- self.account = RwcalYang.CloudAccount()
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
self.account.account_type = "cloudsim_proxy"
self.account.cloudsim_proxy.host = "192.168.122.1"
def create_image(self, location):
"""Creates and returns a CAL image"""
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "rift-lxc-image"
image.location = location
image.disk_format = "qcow2"
def create_network(self, network_name, subnet):
"""Creates and returns a CAL network"""
- network = RwcalYang.NetworkInfoItem(
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList(
network_name=network_name,
subnet=subnet,
)
A VM object
"""
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_name = 'rift-s{}'.format(index + 1)
vm.image_id = image.id
vm.user_tags.node_id = str(uuid.uuid4())
Returns a port object
"""
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_name = "eth1"
port.network_id = network.network_id
port.vm_id = vm.vm_id
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
##
# Yang targets
##
+
rift_add_yang_target(
TARGET rwsdn_yang
YANG_FILES rw-sdn.yang
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
rwsdnal_yang_gen
DEPENDS
rwsdnal_yang
+ ASSOCIATED_FILES
+ rw-sdn.role.xml
)
rift_add_yang_target(
TARGET rwcloud_yang
YANG_FILES rw-cloud.yang
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
rwsdn_yang_gen
rwcal_yang_gen
+ rwprojectmano_yang_gen
+ mano-types_yang_gen
DEPENDS
rwcal_yang
rwsdnal_yang
+ ASSOCIATED_FILES
+ rw-cloud.role.xml
)
rift_add_yang_target(
TARGET rwconfig_agent_yang
YANG_FILES rw-config-agent.yang
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
rwcal_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
rwcal_yang
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ rw-config-agent.role.xml
+)
+
+rift_add_yang_target(
+ TARGET rwro_account_yang
+ YANG_FILES rw-ro-account.yang
+ COMPONENT ${INSTALL_COMPONENT}
+ LIBRARIES
+ rwprojectmano_yang_gen
+ mano-types_yang_gen
+ DEPENDS
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ rw-ro-account.role.xml
)
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-cloud-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/rw-cloud:update-cloud-status/rw-cloud:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ <path>/rw-cloud:update-cloud-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-cloud:cloud</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-cloud-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-cloud:update-cloud-status</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix rw-cloud;
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
import tailf-common {
prefix tailf-common;
}
- tailf-common:annotate "/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" {
+ tailf-common:annotate "/rw-project:project/rw-cloud:cloud/rw-cloud:account/rw-cloud:connection-status" {
tailf-common:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-cloud";
prefix "rw-cloud";
-
- import rw-pb-ext {
- prefix "rw-pb-ext";
- }
-
import rwcal {
prefix "rwcal";
}
prefix "rw-sdn";
}
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-14 {
description
"Initial revision.";
}
- container cloud {
- rw-pb-ext:msg-new CloudConfig;
- list account {
- rw-pb-ext:msg-new CloudAccount;
- description "Configure Cloud Accounts";
+ augment "/rw-project:project" {
+ container cloud {
+ list account {
+ description "Configure Cloud Accounts";
- max-elements 16;
- key "name";
+ max-elements 16;
+ key "name";
- leaf name {
- mandatory true;
- type string {
+ leaf name {
+ mandatory true;
+ type string {
length "1..255";
+ }
}
- }
- leaf sdn-account {
- description "Configured SDN account associated with this cloud account";
- type leafref {
- path "/rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ leaf sdn-account {
+ description "Configured SDN account associated with this cloud account";
+ type leafref {
+ path "../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ }
}
- }
- uses rwcal:provider-auth;
- uses rwcal:connection-status;
+ uses rwcal:provider-auth;
+ uses rwcal:connection-status;
+ uses rwcal:instance-timeout;
+ }
}
}
"The cloud account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-config-agent-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/rw-config-agent:update-cfg-agent-status/rw-config-agent:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ <path>/rw-config-agent:update-cfg-agent-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-config-agent:config-agent</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-config-agent-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-config-agent:update-cfg-agent-status</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf-common;
}
- tailf-common:annotate "/rw-config-agent:config-agent/rw-config-agent:account/rw-config-agent:connection-status" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-config-agent:config-agent" +
+ "/rw-config-agent:account/rw-config-agent:connection-status" {
tailf-common:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-config-agent";
prefix "rw-config-agent";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
prefix "rwcal";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-02-04 {
description
"Initial revision.";
}
}
- container config-agent {
- rwpb:msg-new ConfigAgent;
+ augment "/rw-project:project" {
+ container config-agent {
+ list account {
+ key "name";
- list account {
- rwpb:msg-new ConfigAgentAccount;
- key "name";
+ description "List of configuration agent accounts";
- description "List of configuration agent accounts";
-
- leaf name {
- description "Name of this config agent account";
- type string;
- }
+ leaf name {
+ description "Name of this config agent account";
+ type string;
+ }
- leaf account-type {
- description
+ leaf account-type {
+ description
"Default account type is Rift Configuration Agent (RiftCA)";
- type config-agent-account-type;
- default "riftca";
- }
+ type config-agent-account-type;
+ default "riftca";
+ }
- choice config-agent-account-type {
- case juju {
- description
- "Configure the VNF through Juju.";
- container juju {
- leaf ip-address {
+ choice config-agent-account-type {
+ case juju {
+ description
+ "Configure the VNF through Juju.";
+ container juju {
+ leaf ip-address {
description "Juju host IP address.";
type inet:ip-address;
- }
- leaf port {
+ }
+ leaf port {
description
- "Juju host port number. Default 17070.";
+ "Juju host port number. Default 17070.";
type inet:port-number;
default 17070;
- }
- leaf user {
+ }
+ leaf user {
description
- "User name to connect to Juju host. Default user-admin.";
+ "User name to connect to Juju host. Default user-admin.";
type string;
default "user-admin" ;
- }
- leaf secret {
+ }
+ leaf secret {
description
- "Admin secret or password for Juju host.";
+ "Admin secret or password for Juju host.";
type string;
+ }
}
}
}
+ uses rwcal:connection-status;
}
- uses rwcal:connection-status;
}
}
"The config agent account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-ro-account-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/rw-ro-account:update-ro-account-status/rw-ro-account:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-ro-account-role</keys-role>
+ <priority>
+ <lower-than>
+ <role>rw-project:project-admin</role>
+ </lower-than>
+ </priority>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-ro-account:ro-account</path>
+ <path>/rw-project:project/rw-ro-account:ro-account-state</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-ro-account-role</keys-role>
+ <priority>
+ <higher-than>
+ <role>rw-project-mano:account-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:lcm-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:catalog-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project:project-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:lcm-admin</role>
+ </higher-than>
+ </priority>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-ro-account:ro-account</path>
+ <path>/rw-ro-account:update-ro-account-status</path>
+ <path>/rw-project:project/rw-ro-account:ro-account-state</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-ro-account-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-ro-account:ro-account</path>
+ <path>/rw-project:project/rw-ro-account:ro-account-state</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-ro-account-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-ro-account:update-ro-account-status</path>
+ </authorize>
+ </role-definition>
+</config>
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-ro-account-annotation
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-ro-account-annotation";
+ prefix "rw-ro-account-ann";
+
+ import rw-ro-account {
+ prefix rw-ro-account;
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import tailf-common {
+ prefix tailf-common;
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-ro-account:ro-account-state" {
+ tailf-common:callpoint rw_callpoint;
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-ro-account:ro-account-state/rw-ro-account:account/rw-ro-account:connection-status" {
+ tailf-common:callpoint rw_callpoint;
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-ro-account:ro-account-state/rw-ro-account:account/rw-ro-account:instance-ref-count" {
+ tailf-common:callpoint rw_callpoint;
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-ro-account:ro-account-state/rw-ro-account:account/rw-ro-account:datacenters" {
+ tailf-common:callpoint rw_callpoint;
+ }
+
+ tailf-common:annotate "/rw-project:project/rw-ro-account:ro-account-state/rw-ro-account:account/rw-ro-account:config-data" {
+ tailf-common:callpoint rw_callpoint;
+ }
+
+ tailf-common:annotate "/rw-ro-account:update-ro-account-status" {
+ tailf-common:actionpoint rw_actionpoint;
+ }
+}
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-ro-account {
+
+ namespace "http://riftio.com/ns/riftware-1.0/rw-ro-account";
+ prefix "rw-ro-account";
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-05-15 {
+ description
+ "Initial revision.";
+ }
+
+ typedef resource-orchestrator-account-type {
+ description "RO account type";
+ type enumeration {
+ enum rift-ro;
+ enum openmano;
+ }
+ }
+
+ typedef connection-status {
+ description "Connection status for the RO account";
+ type enumeration {
+ enum unknown;
+ enum validating;
+ enum success;
+ enum failure;
+ }
+ }
+
+ augment "/rw-project:project" {
+ container ro-account {
+ list account {
+ key "name";
+ description "Configure RO Accounts";
+
+ leaf name {
+ type string;
+ }
+
+ leaf ro-account-type {
+ type resource-orchestrator-account-type;
+ }
+
+ choice resource-orchestrator {
+ description
+ "The resource orchestrator to use by the Launchpad";
+
+ case openmano {
+ description
+ "Use OpenMano as RO";
+
+ container openmano {
+ leaf host {
+ type string;
+ default "localhost";
+ }
+
+ leaf port {
+ type uint16;
+ default 9090;
+ }
+
+ leaf tenant-id {
+ type string {
+ length "36";
+ }
+ mandatory true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ augment "/rw-project:project" {
+ container ro-account-state {
+ config false;
+
+ list account {
+ key "name";
+ description "RO Account Operational State";
+
+ leaf name {
+ type string;
+ }
+
+ container connection-status {
+ leaf status {
+ type connection-status;
+ }
+ leaf details {
+ type string;
+ }
+ }
+
+ container instance-ref-count{
+ leaf count {
+ type uint16;
+ description "No of NSD that got instantiated using this RO account";
+ }
+ }
+
+ container datacenters {
+ list datacenters {
+ key "name";
+
+ leaf uuid {
+ description "The UUID of the data center";
+ type yang:uuid;
+ }
+
+ leaf name {
+ description "The name of the data center";
+ type string;
+ }
+
+ leaf datacenter-type
+ {
+ description "The type for the data center";
+ type manotypes:cloud-account-type;
+ }
+ }
+ }
+
+ container config-data{
+ leaf ro-account-type {
+ default "rift";
+ type string;
+ }
+ }
+ }
+ }
+ }
+
+ rpc update-ro-account-status {
+ description "update ro account connection status";
+ input {
+ leaf ro-account {
+ mandatory true;
+ description
+ "The RO account name to update connection status for";
+ type string;
+ }
+ uses manotypes:rpc-project-name;
+ }
+ }
+
+}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-sdn-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/rw-sdn:update-sdn-status/rw-sdn:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ <path>/rw-sdn:update-sdn-status</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-sdn:sdn</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-sdn-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-sdn:update-sdn-status</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
import rw-sdn {
prefix rw-sdn;
}
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
import tailf-common {
prefix tailf-common;
}
- tailf:annotate "/rw-sdn:sdn/rw-sdn:account/rw-sdn:connection-status" {
+ tailf-common:annotate "/rw-project:project/rw-sdn:sdn/rw-sdn:account/rw-sdn:connection-status" {
tailf-common:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-sdn";
prefix "rw-sdn";
-
- import rw-pb-ext {
- prefix "rwpb";
+ import rw-project {
+ prefix "rw-project";
}
import rwsdnal {
prefix "rwsdnal";
}
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-14 {
description
"Initial revision.";
}
- container sdn {
- rwpb:msg-new SDNAccountConfig;
- list account {
- rwpb:msg-new SDNAccount;
- key "name";
- leaf name {
- type string;
- }
+ augment "/rw-project:project" {
+ container sdn {
+ list account {
+ key "name";
+ leaf name {
+ type string;
+ }
- uses rwsdnal:sdn-provider-auth;
- uses rwsdnal:connection-status;
+ uses rwsdnal:sdn-provider-auth;
+ uses rwsdnal:connection-status;
+ }
}
}
"The sdn account name to update connection status for";
type string;
}
+
+ uses manotypes:rpc-project-name;
+ }
+ }
+
+ notification sdn-notif {
+ description "Notification for SDN account credentials";
+ leaf name {
+ description "SDN account name";
+ type string;
+ }
+
+ leaf message {
+ description "Notification message";
+ type string;
}
}
}
FILES
rift/mano/__init__.py
rift/mano/ncclient.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/cloud/accounts.py
rift/mano/cloud/config.py
rift/mano/cloud/operdata.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
+ PYTHON3_ONLY
+ )
+
+rift_python_install_tree(
+ FILES
+ rift/mano/ro_account/__init__.py
+ rift/mano/ro_account/accounts.py
+ rift/mano/ro_account/config.py
+ rift/mano/ro_account/operdata.py
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/sdn/accounts.py
rift/mano/sdn/config.py
rift/mano/sdn/operdata.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/config_agent/operdata.py
rift/mano/config_agent/__init__.py
rift/mano/config_agent/config.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/dts/subscriber/store.py
rift/mano/dts/subscriber/ns_subscriber.py
rift/mano/dts/subscriber/vnf_subscriber.py
- rift/mano/dts/subscriber/ro_account.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
FILES
rift/mano/dts/rpc/__init__.py
rift/mano/dts/rpc/core.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
FILES
rift/downloader/__init__.py
rift/downloader/base.py
+ rift/downloader/local_file.py
rift/downloader/url.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
FILES
rift/mano/config_data/__init__.py
rift/mano/config_data/config.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/tosca_translator/rwmano/tosca/tosca_initial_config.py
rift/mano/tosca_translator/rwmano/tosca/tosca_placement_group.py
rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_configuration.py
+ rift/mano/tosca_translator/rwmano/tosca/tosca_vnf_ns_service_primitive.py
rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_graph.py
rift/mano/tosca_translator/rwmano/tosca/tosca_forwarding_path.py
rift/mano/tosca_translator/common/__init__.py
rift/mano/tosca_translator/conf/translator.conf
rift/mano/tosca_translator/conf/__init__.py
rift/mano/tosca_translator/conf/config.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/utils/__init.py__
rift/mano/utils/compare_desc.py
rift/mano/utils/juju_api.py
+ rift/mano/utils/ssh_keys.py
+ rift/mano/utils/project.py
rift/mano/utils/short_name.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
rift/mano/yang_translator/common/utils.py
rift/mano/yang_translator/common/exception.py
rift/mano/yang_translator/common/__init__.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY
)
FILES rift/mano/yang_translator/riftiotypes.yaml
DESTINATION
usr/rift/mano/common
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
FILES rift/mano/tosca_translator/dummy_vnf_node.yaml
DESTINATION
usr/rift/mano/common
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
FILES ${TRANSLATOR_SCRIPTS}
DESTINATION
usr/bin
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
set(subdirs
self.progress_percent = 0
self.bytes_downloaded = 0
self.bytes_per_second = 0
-
-
+ self.status = None
self.start_time = 0
self.stop_time = 0
self.detail = ""
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Taken from http://stackoverflow.com/a/27786580
+
+
+import logging
+import requests
+import os
+from urllib.parse import urlparse
+
+
+class LocalFileAdapter(requests.adapters.BaseAdapter):
+ """Protocol Adapter to allow Requests to GET file:// URLs
+
+ @todo: Properly handle non-empty hostname portions.
+ """
+
+ @staticmethod
+ def _chkpath(method, path):
+ """Return an HTTP status for the given filesystem path."""
+ if method.lower() in ('put', 'delete'):
+ return 501, "Not Implemented" # TODO
+ elif method.lower() not in ('get', 'head'):
+ return 405, "Method Not Allowed"
+ elif os.path.isdir(path):
+ return 400, "Path Not A File"
+ elif not os.path.isfile(path):
+ return 404, "File Not Found"
+ elif not os.access(path, os.R_OK):
+ return 403, "Access Denied"
+ else:
+ return 200, "OK"
+
+ def send(self, req, **kwargs): # pylint: disable=unused-argument
+ """Return the file specified by the given request
+
+ @type req: C{PreparedRequest}
+ @todo: Should I bother filling `response.headers` and processing
+ If-Modified-Since and friends using `os.stat`?
+ """
+
+ log = logging.getLogger('rw-mano-log')
+ log.debug("Request: {}".format(req))
+
+ url = urlparse(req.path_url)
+ path = os.path.normcase(os.path.normpath(url.path))
+ response = requests.Response()
+
+ response.status_code, response.reason = self._chkpath(req.method, path)
+ log.debug("Response {}: {}".format(response.status_code, response.reason))
+ if response.status_code == 200 and req.method.lower() != 'head':
+ try:
+ response.raw = open(path, 'rb')
+ except (OSError, IOError) as err:
+ response.status_code = 500
+ response.reason = str(err)
+
+ if isinstance(req.url, bytes):
+ response.url = req.url.decode('utf-8')
+ else:
+ response.url = req.url
+
+ response.request = req
+ response.connection = self
+
+
+ log.debug("Response {}: {}".format(response.status_code, response))
+ return response
+
+ def close(self):
+ pass
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from . import base
+from .local_file import LocalFileAdapter as LocalFileAdapter
class UrlDownloader(base.AbstractDownloader):
"""Handles downloads of URL with some basic retry strategy.
retries = Retry(total=2, backoff_factor=1)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
+ session.mount("file://", LocalFileAdapter())
return session
self.meta.update_data_with_head(response.headers)
self.meta.start_download()
- self.download_started()
+ self.download_progress()
url_options["stream"] = True,
request = self.session.get(self.url, **url_options)
chunk = self.check_and_decompress(chunk)
self._fh.write(chunk)
- self.download_progress()
+ #self.download_progress()
self.meta.end_download()
self.close()
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import sys
import asyncio
from gi import require_version
+require_version('RwCal', '1.0')
require_version('RwcalYang', '1.0')
require_version('RwTypes', '1.0')
require_version('RwCloudYang', '1.0')
self._cal = self.plugin.get_interface("Cloud")
self._cal.init(rwlog_hdl)
-
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@property
def cal_account_msg(self):
- return RwcalYang.CloudAccount.from_dict(
+ return RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(
self.account_msg.as_dict(),
ignore_missing_keys=True,
)
def cloud_account_msg(self, account_dict):
- self._account_msg = RwCloudYang.CloudAccount.from_dict(account_dict)
+ self._account_msg = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account_dict)
@property
def account_type(self):
@asyncio.coroutine
def validate_cloud_account_credentials(self, loop):
- self._log.debug("Validating Cloud Account credentials %s", self._account_msg)
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._log.debug("Validating Cloud Account credentials for account %s",
+ self.name)
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="validating",
details="Cloud account connection validation in progress"
)
self.cal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwCloudYang.CloudAccount_ConnectionStatus(
+ self._status = RwCloudYang.YangData_RwProject_Project_Cloud_Account_ConnectionStatus(
status="failure",
details="Error when calling CAL validate cloud creds"
)
- self._log.info("Got cloud account validation response: %s", self._status)
+ if self._status.status == 'failure':
+ self._log.error("Cloud account validation failed. Acct: %s, response: %s",
+ self.name, self._status)
+ @asyncio.coroutine
def start_validate_credentials(self, loop):
if self._validate_task is not None:
self._validate_task.cancel()
self._validate_task = None
- self._validate_task = asyncio.ensure_future(
+ self._validate_task = yield from asyncio.ensure_future(
self.validate_cloud_account_credentials(loop),
loop=loop
)
-
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
RwDts as rwdts,
ProtobufC,
+ RwCloudYang,
+ RwTypes
)
from . import accounts
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class CloudAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
class CloudAccountConfigSubscriber(object):
XPATH = "C,/rw-cloud:cloud/rw-cloud:account"
- def __init__(self, dts, log, rwlog_hdl, cloud_callbacks):
+ def __init__(self, dts, log, rwlog_hdl, project, cloud_callbacks):
self._dts = dts
self._log = log
self._rwlog_hdl = rwlog_hdl
+ self._project = project
self._reg = None
self.accounts = {}
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("Project {}: De-register cloud account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ @asyncio.coroutine
def register(self):
@asyncio.coroutine
- def apply_config(dts, acg, xact, action, _):
+ def apply_config(dts, acg, xact, action, scratch):
self._log.debug("Got cloud account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
for cfg in curr_cfg:
self._log.debug("Cloud account being re-added after restart.")
if not cfg.has_field('account_type'):
- raise CloudAccountError("New cloud account must contain account_type field.")
+ self._log.error("New cloud account must contain account_type field.")
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
self.add_account(cfg)
else:
# When RIFT first comes up, an INSTALL is called with the current config
return
+ #Updating the account incase individual fields of cloud accounts is being deleted.
+ if self._reg:
+ for cfg in self._reg.get_xact_elements(xact):
+ if cfg.name in scratch.get('cloud_accounts', []):
+ self.update_account(cfg)
+ scratch.pop('cloud_accounts', None)
+
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
dts_member_reg=self._reg,
xact=xact,
""" Prepare callback from DTS for Cloud Account """
action = xact_info.query_action
+
+ xpath = ks_path.to_xpath(RwCloudYang.get_schema())
+
self._log.debug("Cloud account on_prepare config received (action: %s): %s",
xact_info.query_action, msg)
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
if msg.name in self.accounts:
- self._log.debug("Cloud account already exists. Invoking update request")
+ self._log.debug("Cloud account {} already exists. " \
+ "Invoking update request".format(msg.name))
# Since updates are handled by a delete followed by an add, invoke the
# delete prepare callbacks to give clients an opportunity to reject.
else:
self._log.debug("Cloud account does not already exist. Invoking on_prepare add request")
if not msg.has_field('account_type'):
- raise CloudAccountError("New cloud account must contain account_type field.")
+ self._log.error("New cloud account must contain account_type field.")
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
account = accounts.CloudAccount(self._log, self._rwlog_hdl, msg)
yield from self._cloud_callbacks.on_add_prepare(account)
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
if fref.is_field_deleted():
- yield from self._cloud_callbacks.on_delete_prepare(msg.name)
-
+ try:
+ yield from self._cloud_callbacks.on_delete_prepare(msg.name)
+ except Exception as e:
+ err_msg = str(e)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, xpath, err_msg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
else:
fref.goto_proto_name(msg.to_pbcm(), "sdn_account")
if fref.is_field_deleted():
del dict_account["sdn_account"]
account.cloud_account_msg(dict_account)
else:
- self._log.error("Deleting individual fields for cloud account not supported")
- xact_info.respond_xpath(rwdts.XactRspCode.NACK)
- return
+ #Updating Account incase individuals fields are being deleted
+ cloud_accounts = scratch.setdefault('cloud_accounts', [])
+ cloud_accounts.append(msg.name)
else:
self._log.error("Action (%s) NOT SUPPORTED", action)
on_apply=apply_config,
)
+ xpath = self._project.add_project(CloudAccountConfigSubscriber.XPATH)
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=CloudAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
+import gi
import rift.tasklets
from gi.repository import(
RwCloudYang,
RwDts as rwdts,
+ RwTypes,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class CloudAccountNotFound(Exception):
pass
class CloudAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
+ self._regh = None
+ self._rpc = None
self.cloud_accounts = {}
def add_cloud_account(self, account):
self.cloud_accounts[account.name] = account
- account.start_validate_credentials(self._loop)
+ asyncio.ensure_future(
+ account.start_validate_credentials(self._loop),
+ loop=self._loop
+ )
def delete_cloud_account(self, account_name):
del self.cloud_accounts[account_name]
self._log.info("Notification called by creating dts query: %s", ac_status)
+ @asyncio.coroutine
def _register_show_status(self):
def get_xpath(cloud_name=None):
return "D,/rw-cloud:cloud/account{}/connection-status".format(
- "[name='%s']" % cloud_name if cloud_name is not None else ''
- )
+ "[name=%s]" % quoted_key(cloud_name) if cloud_name is not None else ''
+ )
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwCloudYang.CloudAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwCloudYang.YangData_RwProject_Project_Cloud_Account.schema().keyspec_to_entry(ks_path)
cloud_account_name = path_entry.key00.name
- self._log.debug("Got show cloud connection status request: %s", ks_path.create_string())
try:
saved_accounts = self.get_saved_cloud_accounts(cloud_account_name)
for account in saved_accounts:
connection_status = account.connection_status
- self._log.debug("Responding to cloud connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
+ xpath = self._project.add_project(get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
)
+ @asyncio.coroutine
def _register_validate_rpc(self):
def get_xpath():
return "/rw-cloud:update-cloud-status"
def on_prepare(xact_info, action, ks_path, msg):
if not msg.has_field("cloud_account"):
raise CloudAccountNotFound("Cloud account name not provided")
-
cloud_account_name = msg.cloud_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cloud_accounts[cloud_account_name]
except KeyError:
- raise CloudAccountNotFound("Cloud account name %s not found" % cloud_account_name)
+ errmsg = "Cloud account name {} not found in project {}". \
+ format(cloud_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise CloudAccountNotFound(errmsg)
- account.start_validate_credentials(self._loop)
+ yield from account.start_validate_credentials(self._loop)
yield from self.create_notification(account)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._rpc = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare
@asyncio.coroutine
def register(self):
+ self._log.debug("Register cloud account for project %s", self._project.name)
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ self._log.debug("De-register cloud account for project %s", self._project.name)
+ self._rpc.deregister()
+ self._regh.deregister()
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
from gi.repository import (
RwcalYang as rwcal,
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class ConfigAgentCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
class ConfigAgentSubscriber(object):
XPATH = "C,/rw-config-agent:config-agent/account"
- def __init__(self, dts, log, config_callbacks):
+ def __init__(self, dts, log, project, config_callbacks):
self._dts = dts
self._log = log
+ self._project = project
self._reg = None
self.accounts = {}
self.delete_account(account_msg)
self.add_account(account_msg)
+ def deregister(self):
+ self._log.debug("De-register config agent handler for project {}".
+ format(self._project.name))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
def apply_config(dts, acg, xact, action, _):
self._log.debug("Got config account apply config (xact: %s) (action: %s)", xact, action)
if xact.xact is None:
- # When RIFT first comes up, an INSTALL is called with the current config
- # Since confd doesn't actally persist data this never has any data so
- # skip this for now.
- self._log.debug("No xact handle. Skipping apply config")
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.info("Config Agent Account {} being re-added after restart.".
+ format(cfg.name))
+ self.add_account(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
return
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Config Account config using xpath: %s",
- ConfigAgentSubscriber.XPATH,
- )
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
+ xpath = self._project.add_project(ConfigAgentSubscriber.XPATH)
+ self._log.debug("Registering for Config Account config using xpath: %s",
+ xpath)
self._reg = acg.register(
- xpath=ConfigAgentSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
on_prepare=on_prepare,
)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import asyncio
import concurrent.futures
+import gi
import time
+import gi
+
+gi.require_version('RwNsrYang', '1.0')
from gi.repository import (
NsrYang,
RwNsrYang,
RwConfigAgentYang,
RwDts as rwdts)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
-
import rift.mano.utils.juju_api as juju
def validate_account_creds(self):
- status = RwcalYang.CloudConnectionStatus()
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus()
try:
env = self._api._get_env()
except juju.JujuEnvError as e:
else:
self._cfg_agent_client_plugin = None
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
def validate_cfg_agent_account_credentials(self, loop):
self._log.debug("Validating Config Agent Account %s, credential status %s", self._account_msg, self._status)
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="validating",
details="Config Agent account connection validation in progress"
)
if self._cfg_agent_client_plugin is None:
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="unknown",
details="Config Agent account does not support validation of account creds"
)
None,
self._cfg_agent_client_plugin.validate_account_creds
)
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus.from_dict(status.as_dict())
except Exception as e:
- self._status = RwConfigAgentYang.ConfigAgentAccount_ConnectionStatus(
+ self._status = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account_ConnectionStatus(
status="failure",
details="Error - " + str(e)
)
)
class CfgAgentDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.cfg_agent_accounts = {}
+ self._show_reg = None
+ self._rpc_reg = None
def add_cfg_agent_account(self, account_msg):
account = ConfigAgentAccount(self._log, account_msg)
def _register_show_status(self):
def get_xpath(cfg_agent_name=None):
return "D,/rw-config-agent:config-agent/account{}/connection-status".format(
- "[name='%s']" % cfg_agent_name if cfg_agent_name is not None else ''
+ "[name=%s]" % quoted_key(cfg_agent_name) if cfg_agent_name is not None else ''
)
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- path_entry = RwConfigAgentYang.ConfigAgentAccount.schema().keyspec_to_entry(ks_path)
+ path_entry = RwConfigAgentYang.YangData_RwProject_Project_ConfigAgent_Account.schema().keyspec_to_entry(ks_path)
cfg_agent_account_name = path_entry.key00.name
self._log.debug("Got show cfg_agent connection status request: %s", ks_path.create_string())
for account in saved_accounts:
connection_status = account.connection_status
self._log.debug("Responding to config agent connection status request: %s", connection_status)
+ xpath = self._project.add_project(get_xpath(account.name))
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- xpath=get_xpath(account.name),
+ xpath=xpath,
msg=account.connection_status,
)
except KeyError as e:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare),
- flags=rwdts.Flag.PUBLISHER,
- )
+ xpath = self._project.add_project(get_xpath())
+ self._show_reg = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
def _register_validate_rpc(self):
def get_xpath():
raise ConfigAgentAccountNotFound("Config Agent account name not provided")
cfg_agent_account_name = msg.cfg_agent_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
account = self.cfg_agent_accounts[cfg_agent_account_name]
except KeyError:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc_reg = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+ def deregister(self):
+ self._show_reg.deregister()
+ self._rpc_reg.deregister()
+
+
class ConfigAgentJob(object):
"""A wrapper over the config agent job object, providing some
convenience functions.
- YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob contains
+ YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob contains
||
==> VNFRS
||
"running" : "pending",
"failed" : "failure"}
- def __init__(self, nsr_id, job, tasks=None):
+ def __init__(self, nsr_id, job, project, tasks=None):
"""
Args:
nsr_id (uuid): ID of NSR record
- job (YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
+ job (YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob): Gi object
tasks: List of asyncio.tasks. If provided the job monitor will
use it to monitor the tasks instead of the execution IDs
"""
self._job = job
self.nsr_id = nsr_id
self.tasks = tasks
+ self._project = project
+
self._regh = None
@property
@property
def xpath(self):
"""Xpath of the job"""
- return ("D,/nsr:ns-instance-opdata" +
- "/nsr:nsr[nsr:ns-instance-config-ref='{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']"
- ).format(self.nsr_id, self.id)
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
+ "/nsr:nsr[nsr:ns-instance-config-ref={}]" +
+ "/nsr:config-agent-job[nsr:job-id={}]"
+ ).format(quoted_key(self.nsr_id), quoted_key(str(self.id))))
@property
def regh(self):
self._regh = hdl
@staticmethod
- def convert_rpc_input_to_job(nsr_id, rpc_output, tasks):
+ def convert_rpc_input_to_job(nsr_id, rpc_output, tasks, project):
"""A helper function to convert the YangOutput_Nsr_ExecNsConfigPrimitive
- to YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
+ to YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob (NsrYang)
Args:
nsr_id (uuid): NSR ID
ConfigAgentJob
"""
# Shortcuts to prevent the HUUGE names.
- CfgAgentJob = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob
- CfgAgentVnfr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
- CfgAgentPrimitive = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
- CfgAgentPrimitiveParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
+ CfgAgentJob = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob
+ CfgAgentVnfr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr
+ CfgAgentPrimitive = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive
+ CfgAgentPrimitiveParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConfigAgentJob_Vnfr_Primitive_Parameter
job = CfgAgentJob.from_dict({
"job_id": rpc_output.job_id,
vnf_primitive = CfgAgentPrimitive.from_dict({
"name": primitive.name,
"execution_status": ConfigAgentJob.STATUS_MAP[primitive.execution_status],
- "execution_id": primitive.execution_id
+ "execution_id": primitive.execution_id,
+ "execution_error_details": primitive.execution_error_details,
})
# Copy over the input param
job.vnfr.append(vnfr_job)
- return ConfigAgentJob(nsr_id, job, tasks)
+ return ConfigAgentJob(nsr_id, job, project, tasks)
class ConfigAgentJobMonitor(object):
registration_handle.update_element(self.job.xpath, self.job.job)
- def get_error_details(self):
+ def get_execution_details(self):
'''Get the error details from failed primitives'''
errs = ''
for vnfr in self.job.job.vnfr:
- if vnfr.vnf_job_status != "failure":
- continue
-
for primitive in vnfr.primitive:
if primitive.execution_status == "failure":
errs += '<error>'
else:
errs += '{}: Unknown error'.format(primitive.name)
errs += "</error>"
-
+ else:
+ if primitive.execution_error_details:
+ errs += '<{status}>{details}</{status}>'.format(
+ status=primitive.execution_status,
+ details=primitive.execution_error_details)
return errs
@asyncio.coroutine
if "failure" in job_status:
self.job.job_status = "failure"
- errs = self.get_error_details()
- if len(errs):
- self.job.job.job_status_details = errs
elif "pending" in job_status:
self.job.job_status = "pending"
else:
self.job.job_status = "success"
+ errs = self.get_execution_details()
+ if len(errs):
+ self.job.job.job_status_details = errs
+
# self.log.debug("Publishing job status: {} at {} for nsr id: {}".format(
# self.job.job_status,
# self.job.xpath,
registration_handle.update_element(self.job.xpath, self.job.job)
+ registration_handle.update_element(self.job.xpath, self.job.job)
except Exception as e:
self.log.exception(e)
for primitive in vnfr.primitive:
if primitive.execution_status != 'pending':
+ if primitive.execution_id == "":
+ # We may not have processed the status for these yet
+ job_status.append(primitive.execution_status)
continue
if primitive.execution_id == "":
job_status.append(primitive.execution_status)
continue
- elif primitive.execution_id == "config":
+ if primitive.execution_id == "config":
# Config job. Check if service is active
task = self.loop.create_task(self.get_service_status(vnfr.id, primitive))
self._nsm = nsm
self._regh = None
- self._nsr_regh = None
+ self._project = cfgm.project
@property
def regh(self):
""" Return the ConfigManager manager instance """
return self._cfgm
- @staticmethod
- def cfg_job_xpath(nsr_id, job_id):
- return ("D,/nsr:ns-instance-opdata" +
- "/nsr:nsr[nsr:ns-instance-config-ref = '{}']" +
- "/nsr:config-agent-job[nsr:job-id='{}']").format(nsr_id, job_id)
+ def cfg_job_xpath(self, nsr_id, job_id):
+ return self._project.add_project(("D,/nsr:ns-instance-opdata" +
+ "/nsr:nsr[nsr:ns-instance-config-ref={}]" +
+ "/nsr:config-agent-job[nsr:job-id={}]").format(quoted_key(nsr_id), quoted_key(str(job_id))))
@asyncio.coroutine
def register(self):
""" prepare callback from dts """
xpath = ks_path.to_xpath(RwNsrYang.get_schema())
if action == rwdts.QueryAction.READ:
- schema = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.schema()
+ schema = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
nsr_id = path_entry.key00.ns_instance_config_ref
for job in jobs:
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
- CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.id),
+ self.cfg_job_xpath(nsr_id, job.id),
job.job)
except Exception as e:
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=CfgAgentJobDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._project.add_project(
+ CfgAgentJobDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
- @asyncio.coroutine
def _terminate_nsr(self, nsr_id):
self._log.debug("NSR {} being terminated".format(nsr_id))
jobs = self.cfgm.get_job(nsr_id)
for job in jobs:
- path = CfgAgentJobDtsHandler.cfg_job_xpath(nsr_id, job.id)
+ path = self.cfg_job_xpath(nsr_id, job.id)
with self._dts.transaction() as xact:
self._log.debug("Deleting job: {}".format(path))
job.regh.delete_element(path)
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
- @asyncio.coroutine
- def register_for_nsr(self):
- """ Register for NSR changes """
-
- @asyncio.coroutine
- def on_prepare(xact_info, query_action, ks_path, msg):
- """ This NSR is created """
- self._log.debug("Received NSR instantiate on_prepare (%s:%s:%s)",
- query_action,
- ks_path,
- msg)
-
- if (query_action == rwdts.QueryAction.UPDATE or
- query_action == rwdts.QueryAction.CREATE):
- pass
- elif query_action == rwdts.QueryAction.DELETE:
- nsr_id = msg.ns_instance_config_ref
- asyncio.ensure_future(self._terminate_nsr(nsr_id), loop=self._loop)
- else:
- raise NotImplementedError(
- "%s action on cm-state not supported",
- query_action)
-
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
-
- try:
- handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- self._nsr_regh = yield from self._dts.register(self.nsr_xpath,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- handler=handler)
- except Exception as e:
- self._log.error("Failed to register for NSR changes as %s", str(e))
+ def deregister(self):
+ self._log.debug("De-register config agent job for project".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class ConfigAgentJobManager(object):
TODO: Needs to support multiple config agents.
"""
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
"""
Args:
dts : Dts handle
self.log = log
self.loop = loop
self.nsm = nsm
+ self.project = project
self.handler = CfgAgentJobDtsHandler(dts, log, loop, nsm, self)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def add_job(self, rpc_output, tasks=None):
- """Once an RPC is trigger add a now job
+ """Once an RPC is triggered, add a new job
Args:
rpc_output (YangOutput_Nsr_ExecNsConfigPrimitive): Rpc output
"""
nsr_id = rpc_output.nsr_id_ref
- job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output, tasks)
+ job = ConfigAgentJob.convert_rpc_input_to_job(nsr_id, rpc_output,
+ tasks, self.project)
self.log.debug("Creating a job monitor for Job id: {}".format(
rpc_output.job_id))
ca = agent
break
+ def done_callback(fut):
+ e = fut.exception()
+ if e:
+ self.log.error("Exception on monitor job {}: {}".
+ format(rpc_output.job_id, e))
+ fut.print_stack()
+ self.log.debug("Monitor job done for {}".format(rpc_output.job_id))
+
# For every Job we will schedule a new monitoring process.
job_monitor = ConfigAgentJobMonitor(
self.dts,
ca
)
task = self.loop.create_task(job_monitor.publish_action_status())
+ task.add_done_callback(done_callback)
def get_job(self, nsr_id):
"""Get the job associated with the NSR Id, if present."""
@asyncio.coroutine
def register(self):
yield from self.handler.register()
- yield from self.handler.register_for_nsr()
+ # yield from self.handler.register_for_nsr()
+
+ def deregister(self):
+ self.handler.deregister()
+ self.handler = None
-############################################################################
+###########################################################################
# Copyright 2016 RIFT.io Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
import os
import yaml
-from gi.repository import NsdYang
-from gi.repository import VnfdYang
+from gi.repository import ProjectNsdYang as NsdYang
+from gi.repository import ProjectVnfdYang as VnfdYang
class InitialConfigReadError(Exception):
super(VnfInitialConfigPrimitiveReader, self).__init__(primitive_input)
def get_initial_config_primitive(self, seq, name):
- return VnfdYang.InitialConfigPrimitive(seq=seq, name=name)
+ return VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive(seq=seq, name=name)
-class NsInitialConfigPrimitiveReader(InitialConfigPrimitiveReader):
+class NsInitialServicePrimitiveReader(InitialConfigPrimitiveReader):
'''Class to read the NS initial config primitives'''
def __init__(self, primitive_input):
- super(NsInitialConfigPrimitiveReader, self).__init__(primitive_input)
+ super(NsInitialServicePrimitiveReader, self).__init__(primitive_input)
def get_initial_config_primitive(self, seq, name):
- return NsdYang.NsdInitialConfigPrimitive(seq=seq, name=name)
+ return NsdYang.YangData_Nsd_NsdCatalog_Nsd_InitialServicePrimitive(seq=seq, name=name)
class ConfigPrimitiveConvertor(object):
PARAMETER = "parameter"
PARAMETER_GROUP = "parameter_group"
- CONFIG_PRIMITIVE = "service_primitive"
+ SERVICE_PRIMITIVE = "service_primitive"
INITIAL_CONFIG_PRIMITIVE = "initial_config_primitive"
def _extract_param(self, param, field="default_value"):
input_data = {}
if config_primitives:
- input_data[self.CONFIG_PRIMITIVE] = {}
+ input_data[self.SERVICE_PRIMITIVE] = {}
for config_primitive in config_primitives:
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name] = {}
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name] = {}
self._extract_parameters(
config_primitive.parameter,
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name])
try:
self._extract_parameter_group(
config_primitive.parameter_group,
- input_data[self.CONFIG_PRIMITIVE][config_primitive.name])
+ input_data[self.SERVICE_PRIMITIVE][config_primitive.name])
except AttributeError:
pass
- if not input_data[self.CONFIG_PRIMITIVE][config_primitive.name]:
- del input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ if not input_data[self.SERVICE_PRIMITIVE][config_primitive.name]:
+ del input_data[self.SERVICE_PRIMITIVE][config_primitive.name]
- if not input_data[self.CONFIG_PRIMITIVE]:
- del input_data[self.CONFIG_PRIMITIVE]
+ if not input_data[self.SERVICE_PRIMITIVE]:
+ del input_data[self.SERVICE_PRIMITIVE]
if initial_configs:
initial_conf = None
try:
- initial_conf = nsd.initial_config_primitive
+ initial_conf = nsd.initial_service_primitive
except AttributeError:
pass
def extract_vnfd_config(self, vnfd, format="yaml"):
config_prim = None
try:
- config_prim = vnfd.vnf_configuration.service_primitive
+ config_prim = vnfd.vnf_configuration.config_primitive
except AttributeError:
pass
pass
def add_nsd_initial_config(self, nsd_init_cfg_prim_msg, input_data):
- """ Add initial config primitives from NS Initial Config Input Data
+ """ Add initial service primitives from NS Initial Config Input Data
Arguments:
nsd_init_cfg_prim_msg - manotypes:nsd/initial_config_primitive pb msg
if self.INITIAL_CONFIG_PRIMITIVE in input_data:
ns_input_data = input_data[self.INITIAL_CONFIG_PRIMITIVE]
- reader = NsInitialConfigPrimitiveReader(ns_input_data)
+ reader = NsInitialServicePrimitiveReader(ns_input_data)
for prim in reader.primitives:
nsd_init_cfg_prim_msg.append(prim)
def merge_nsd_initial_config(self, nsd, input_data):
try:
- for config_primitive in nsd.initial_config_primitive:
+ for service_primitive in nsd.initial_service_primitive:
for cfg in input_data[self.INITIAL_CONFIG_PRIMITIVE]:
- if cfg['name'] == config_primitive.name:
+ if cfg['name'] == service_primitive.name:
self.merge_params(
- config_primitive.parameter,
+ service_primitive.parameter,
cfg[self.PARAMETER],
field="value")
break
except AttributeError as e:
- self._log.debug("Did not find initial-config-primitive for NSD {}: {}".
+ self._log.debug("Did not find initial-service-primitive for NSD {}: {}".
format(nsd.name, e))
-
def merge_nsd_config(self, nsd, input_data):
- for config_primitive in nsd.service_primitive:
+ for service_primitive in nsd.service_primitive:
try:
- cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ cfg = input_data[self.SERVICE_PRIMITIVE][service_primitive.name]
except KeyError:
continue
self.merge_params(
- config_primitive.parameter,
+ service_primitive.parameter,
cfg[self.PARAMETER])
- for param_group in config_primitive.parameter_group:
+ for param_group in service_primitive.parameter_group:
self.merge_params(
param_group.parameter,
cfg[self.PARAMETER_GROUP][param_group.name])
vnfd_init_cfg_prim_msg.append(prim)
def merge_vnfd_config(self, vnfd, input_data):
- for config_primitive in vnfd.vnf_configuration.service_primitive:
+ for config_primitive in vnfd.vnf_configuration.config_primitive:
try:
- cfg = input_data[self.CONFIG_PRIMITIVE][config_primitive.name]
+ cfg = input_data[self.SERVICE_PRIMITIVE][config_primitive.name]
except KeyError:
continue
class ConfigStore(object):
"""Convenience class that fetches all the instance related data from the
- $RIFT_ARTIFACTS/launchpad/libs directory.
+ $RIFT_VAR_ROOT/launchpad/libs directory.
"""
def __init__(self, log):
self._log = log
self.converter = ConfigPrimitiveConvertor()
- def merge_vnfd_config(self, nsd_id, vnfd, member_vnf_index):
+ def merge_vnfd_config(self,project_name, nsd_id, vnfd, member_vnf_index):
"""Merges the vnfd config from the config directory.
Args:
the member index ref.
"""
nsd_archive = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
- nsd_id,
- "config")
+ os.getenv('RIFT_VAR_ROOT'),
+ "launchpad/packages/vnfd/",
+ project_name,
+ vnfd.id,
+ "vnf_config")
self._log.info("Looking for config from the archive {}".format(nsd_archive))
input_data = yaml.load(fh)
return input_data
- def merge_nsd_config(self, nsd):
+ def merge_nsd_config(self, nsd, project_name):
nsd_archive = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
+ os.getenv('RIFT_VAR_ROOT'),
+ "launchpad/packages/nsd/",
+ project_name,
nsd.id,
- "config")
+ "ns_config")
self._log.info("Looking for config from the archive {}".format(nsd_archive))
import pytest
import uuid
-from gi.repository import NsdYang, VnfdYang
+from gi.repository import (
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
+ )
from ..config import ConfigPrimitiveConvertor
import yaml
@pytest.fixture(scope="function")
def nsd():
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
nsd = catalog.nsd.add()
nsd.id = str(uuid.uuid1())
return nsd
@pytest.fixture(scope="function")
def vnfd():
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = catalog.vnfd.add()
vnfd.id = str(uuid.uuid1())
return vnfd
],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [
{"name": "Foo", "default_value": "Bar"}
]
})
- expected_yaml = """service_primitive:
+ expected_yaml = """config_primitive:
PE1:
parameter:
Foo: Bar
"parameter": [{"name": "cidr"}],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [{"name": "Foo",}]
})
- ip_yaml = """service_primitive:
+ ip_yaml = """config_primitive:
PE1:
parameter:
Foo: Bar
cidr: 10.10.10.2/30
"""
- catalog = VnfdYang.YangData_Vnfd_VnfdCatalog()
+ catalog = VnfdYang.YangData_RwProject_Project_VnfdCatalog()
expected_vnfd = catalog.vnfd.add()
vnf_config = expected_vnfd.vnf_configuration
expected_vnfd.id = vnfd.id
],
})
- vnf_config.service_primitive.add().from_dict({
+ vnf_config.config_primitive.add().from_dict({
"name": "PE1",
"parameter": [
{"name": "Foo", "default_value": "Bar"}
Vlan ID: '3000'
"""
- catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ catalog = NsdYang.YangData_RwProject_Project_NsdCatalog()
expected_nsd = catalog.nsd.add()
expected_nsd.id = nsd.id
expected_nsd.service_primitive.add().from_dict(
NsdCatalogSubscriber,
NsInstanceConfigSubscriber)
from .subscriber.store import SubscriberStore
-from .subscriber.ro_account import ROAccountConfigSubscriber
from .rpc.core import AbstractRpcHandler
\ No newline at end of file
"""A common class to hold the barebone objects to build a publisher or
subscriber
"""
- def __init__(self, log, dts, loop):
+ def __init__(self, log, dts, loop, project):
"""Constructor
Args:
loop : Asyncio event loop.
"""
# Reg handle
- self.reg = None
- self.log = log
- self.dts = dts
- self.loop = loop
+ self._reg = None
+ self._log = log
+ self._dts = dts
+ self._loop = loop
+ self._project = project
+
+ @property
+ def reg(self):
+ return self._reg
+
+ @reg.setter
+ def reg(self, val):
+ self._reg = val
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def project(self):
+ return self._project
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler ({}) for project {}".
+ format(self.__class__.__name__, self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
class AbstractRpcHandler(DtsHandler):
"""Base class to simplify RPC implementation
"""
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project=None):
+ super().__init__(log, dts, loop, project)
if not asyncio.iscoroutinefunction(self.callback):
raise ValueError('%s has to be a coroutine' % (self.callback))
def on_prepare(self, xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ if self.project and not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
try:
rpc_op = yield from self.callback(ks_path, msg)
xact_info.respond_xpath(
@asyncio.coroutine
def register(self):
+ if self.reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
reg_event = asyncio.Event(loop=self.loop)
@asyncio.coroutine
yield from reg_event.wait()
+ def deregister(self):
+ self.reg.deregister()
+ self.reg = None
+
@abc.abstractmethod
@asyncio.coroutine
def callback(self, ks_path, msg):
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from gi.repository import (RwDts as rwdts, ProtobufC)
import rift.tasklets
+from rift.mano.utils.project import (
+ get_add_delete_update_cfgs,
+ )
from ..core import DtsHandler
"""A common class for all subscribers.
"""
@classmethod
- def from_tasklet(cls, tasklet, callback=None):
+ def from_project(cls, proj, callback=None):
"""Convenience method to build the object from tasklet
Args:
- tasklet (rift.tasklets.Tasklet): Tasklet
+ proj (rift.mano.utils.project.ManoProject): Project
callback (None, optional): Callable, which will be invoked on
subscriber changes.
msg: The Gi Object msg from DTS
action(rwdts.QueryAction): Action type
"""
- return cls(tasklet.log, tasklet.dts, tasklet.loop, callback=callback)
+ return cls(proj.log, proj.dts, proj.loop, proj, callback=callback)
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
self.callback = callback
+ @abc.abstractmethod
+ def get_xpath(self):
+ """
+ Returns:
+ str: xpath
+ """
+ pass
+
def get_reg_flags(self):
"""Default set of REG flags, can be over-ridden by sub classes.
-
+
Returns:
Set of rwdts.Flag types.
"""
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY|rwdts.Flag.CACHE
+ @asyncio.coroutine
+ def data(self):
+ itr = yield from self.dts.query_read(
+ self.get_xpath())
+
+ values = []
+ for res in itr:
+ result = yield from res
+ result = result.result
+ values.append(result)
+
+ return values
+
class AbstractOpdataSubscriber(SubscriberDtsHandler):
Opdata subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
- @abc.abstractmethod
- def get_xpath(self):
- """
- Returns:
- str: xpath
- """
- pass
@asyncio.coroutine
def register(self):
"""Triggers the registration
"""
+
+ if self._reg:
+ self._log.warning("RPC already registered for project {}".
+ format(self._project.name))
+ return
+
xacts = {}
def on_commit(xact_info):
- xact_id = xact_info.handle.get_xact().id
- if xact_id in xacts:
- msg, action = xacts.pop(xact_id)
+ try:
+ xact_id = xact_info.handle.get_xact().id
+ if xact_id in xacts:
+ msg, action = xacts.pop(xact_id)
- if self.callback:
- self.callback(msg, action)
+ if self.callback:
+ self.callback(msg, action)
+ except Exception as e:
+ self.log.error("Exception when committing data for registration:{} exception:{}".format(self.get_xpath(), e))
+ self.log.exception(e)
return rwdts.MemberRspCode.ACTION_OK
except Exception as e:
self.log.exception(e)
- finally:
+ try:
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning("Reg handle is None during action {} for {}: {}".
+ format(action, self.__class__, e))
reg_event = asyncio.Event(loop=self.loop)
on_commit=on_commit
)
- self.reg = yield from self.dts.register(
- xpath=self.get_xpath(),
+ self._reg = yield from self.dts.register(
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
handler=handler)
# yield from reg_event.wait()
- assert self.reg is not None
-
- def deregister(self):
- self.reg.deregister()
+ assert self._reg is not None
class AbstractConfigSubscriber(SubscriberDtsHandler):
Config subscriber can be created in one step by subclassing and implementing
the MANDATORY get_xpath() method
-
+
"""
KEY = "msgs"
def key_name(self):
pass
- def get_add_delete_update_cfgs(self, dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
- is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
-
-
- add_cfgs, delete_cfgs, update_cfgs = self.get_add_delete_update_cfgs(
- dts_member_reg=self.reg,
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ try:
+ if self._reg:
+ for cfg in self._reg.elements:
+ if self.callback:
+ self.callback(cfg, rwdts.QueryAction.CREATE)
+
+ else:
+ self._log.error("Reg handle is None during action {} for {}".
+ format(action, self.__class__))
+
+ except Exception as e:
+ self._log.exception("Adding config {} during restart failed: {}".
+ format(cfg, e))
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
xact=xact,
key_name=self.key_name())
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ self._log.debug("Subscriber DTS prepare for project %s: %s",
+ self.project, xact_info.query_action)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning(
+ "Subscriber DTS prepare for project {}, action {} in class {} failed: {}".
+ format(self.project, xact_info.query_action, self.__class__, e))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self.dts.appconf_group_create(handler=acg_hdl) as acg:
- self.reg = acg.register(
- xpath=self.get_xpath(),
+ self._reg = acg.register(
+ xpath=self.project.add_project(self.get_xpath()),
flags=self.get_reg_flags(),
on_prepare=on_prepare)
-
- def deregister(self):
- self.reg.deregister()
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
class NsdCatalogSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/nsd:nsd-catalog/nsd:nsd"
+ return self._project.add_project("C,/project-nsd:nsd-catalog/project-nsd:nsd")
class NsInstanceConfigSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/nsr:ns-instance-config/nsr:nsr"
+ return self._project.add_project("C,/nsr:ns-instance-config/nsr:nsr")
+++ /dev/null
-"""
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-@file ro_account.py
-@author Varun Prasad (varun.prasad@riftio.com)
-@date 09-Jul-2016
-
-"""
-
-import gi
-gi.require_version('RwDts', '1.0')
-from gi.repository import RwDts as rwdts
-
-from . import core
-
-class ROAccountConfigSubscriber(core.AbstractConfigSubscriber):
-
- def key_name(self):
- return "name"
-
- def get_xpath(self):
- return("C,/rw-launchpad:resource-orchestrator")
\ No newline at end of file
"""
KEY = enum.Enum('KEY', 'NSR NSD VNFD VNFR')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project)
- params = (self.log, self.dts, self.loop)
+ params = (self.log, self.dts, self.loop, self.project)
self._nsr_sub = ns_subscriber.NsrCatalogSubscriber(*params, callback=self.on_nsr_change)
self._nsrs = {}
yield from self._vnfr_sub.register()
yield from self._nsr_sub.register()
+ def deregister(self):
+ self._log.debug("De-register store for project {}".
+ format(self._project))
+ self._vnfd_sub.deregister()
+ self._nsd_sub.deregister()
+ self._vnfr_sub.deregister()
+ self._nsr_sub.deregister()
+
@asyncio.coroutine
def refresh_store(self, subsriber, store):
itr = yield from self.dts.query_read(subsriber.get_xpath())
#
import asyncio
+import gi
import sys
import types
import unittest
import uuid
-
import rift.test.dts
import rift.mano.dts as store
-import gi
gi.require_version('RwDtsYang', '1.0')
from gi.repository import (
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
+ RwProjectNsdYang as RwNsdYang,
VnfrYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class DescriptorPublisher(object):
def test_vnfd_handler(self):
yield from self.store.register()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
mock_vnfd.id = str(uuid.uuid1())
- w_xpath = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
- xpath = "{}[vnfd:id='{}']".format(w_xpath, mock_vnfd.id)
+ w_xpath = "C,/rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ xpath = "{}[project-vnfd:id={}]".format(w_xpath, quoted_key(mock_vnfd.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfd)
yield from asyncio.sleep(5, loop=self.loop)
def test_vnfr_handler(self):
yield from self.store.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
+ xpath = "{}[vnfr:id={}]".format(w_xpath, quoted_key(mock_vnfr.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
yield from asyncio.sleep(5, loop=self.loop)
def test_nsr_handler(self):
yield from self.store.register()
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
mock_nsr.ns_instance_config_ref = str(uuid.uuid1())
mock_nsr.name_ref = "Foo"
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
- xpath = "{}[nsr:ns-instance-config-ref='{}']".format(w_xpath, mock_nsr.ns_instance_config_ref)
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
+ xpath = "{}[nsr:ns-instance-config-ref={}]".format(w_xpath, quoted_key(mock_nsr.ns_instance_config_ref))
yield from self.publisher.publish(w_xpath, xpath, mock_nsr)
yield from asyncio.sleep(5, loop=self.loop)
def test_nsd_handler(self):
yield from self.store.register()
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
mock_nsd.id = str(uuid.uuid1())
- w_xpath = "C,/nsd:nsd-catalog/nsd:nsd"
- xpath = "{}[nsd:id='{}']".format(w_xpath, mock_nsd.id)
+ w_xpath = "C,/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd"
+ xpath = "{}[project-nsd:id={}]".format(w_xpath, quoted_key(mock_nsd.id))
yield from self.publisher.publish(w_xpath, xpath, mock_nsd)
yield from asyncio.sleep(2, loop=self.loop)
# publish
yield from vnf_handler.register()
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
mock_vnfr.id = str(uuid.uuid1())
def mon_xpath(param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
- "/vnfr:vnfr[vnfr:id='{}']".format(mock_vnfr.id) +
+ return("D,/rw-project:project/vnfr:vnfr-catalog" +
+ "/vnfr:vnfr[vnfr:id={}]".format(quoted_key(mock_vnfr.id)) +
"/vnfr:monitoring-param" +
- ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+ ("[vnfr:id={}]".format(quoted_key(param_id)) if param_id else ""))
- w_xpath = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- xpath = "{}[vnfr:id='{}']".format(w_xpath, mock_vnfr.id)
+ w_xpath = "D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr"
+ xpath = "{}[vnfr:id={}]".format(w_xpath, quoted_key(mock_vnfr.id))
yield from self.publisher.publish(w_xpath, xpath, mock_vnfr)
- mock_param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ mock_param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
"id": "1"
})
mock_vnfr.monitoring_param.append(mock_param)
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
return rwdts.Flag.SUBSCRIBER|rwdts.Flag.DELTA_READY
def get_xpath(self):
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr"
+ return self.project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr")
class VnfdCatalogSubscriber(core.AbstractConfigSubscriber):
return "id"
def get_xpath(self):
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ return self.project.add_project("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd")
self.loop = loop
self._nc_mgr = None
- self._model = RwYang.Model.create_libncx()
+ self._model = RwYang.Model.create_libyang()
@asyncio.coroutine
def connect(self, timeout=240):
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .accounts import (
+ ROAccount
+ )
+
+from .config import (
+ ROAccountConfigSubscriber,
+ ROAccountConfigCallbacks
+ )
+
+from .operdata import (
+ ROAccountDtsOperdataHandler
+ )
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import asyncio
+
+from gi.repository import (
+ RwDts as rwdts,
+ RwRoAccountYang,
+ )
+
+import rift.mano.dts as mano_dts
+import rift.tasklets
+
+from rift.tasklets.rwnsmtasklet import openmano_nsm
+from rift.tasklets.rwnsmtasklet import rwnsmplugin
+
+class ROAccount(object):
+ """
+ RO Account Model class
+ """
+ DEFAULT_PLUGIN = rwnsmplugin.RwNsPlugin
+
+ def __init__(self, dts=None, log=None, loop=None, project=None, records_publisher=None, account_msg=None):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+ self._records_publisher = records_publisher
+ self._account_msg = None
+ if account_msg is not None:
+ self._account_msg = account_msg.deep_copy()
+ self._name = self._account_msg.name
+
+ self._datacenters = []
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="unknown",
+ details="Connection status lookup not started"
+ )
+ self.live_instances = 0
+
+ if self._dts is None:
+ return
+
+ self._nsm_plugins = rwnsmplugin.NsmPlugins()
+ self._nsm_cls = self.DEFAULT_PLUGIN
+
+ try:
+ self._nsm_cls = self._nsm_plugins.class_by_plugin_name(
+ account_msg.ro_account_type
+ )
+ except KeyError as e:
+ self._log.warning(
+ "RO account nsm plugin not found: %s. Using standard rift nsm.",
+ account_msg.name
+ )
+
+ self._ro_plugin = self._create_plugin(self._nsm_cls, account_msg)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def account_msg(self):
+ return self._account_msg
+
+ @property
+ def ro_acccount_type(self):
+ return self._account_msg.ro_account_type if self._account_msg else 'rift'
+
+ @property
+ def ro_plugin(self):
+ return self._ro_plugin
+
+ @property
+ def connection_status(self):
+ return self._status
+
+ def _create_plugin(self, nsm_cls, account_msg):
+ self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
+ nsm_instance = nsm_cls(self._dts, self._log, self._loop,
+ self._records_publisher, account_msg, self._project)
+ return nsm_instance
+
+ def check_ro_account_status(self):
+ self._log.debug("Checking RO Account Status. Acct: %s",
+ self.name)
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="validating",
+ details="RO account connection status check in progress"
+ )
+ try:
+ self._datacenters = []
+ for uuid, name in self._ro_plugin._cli_api.datacenter_list():
+ self._datacenters.append({
+ 'uuid':uuid,
+ 'name':name
+ }
+ )
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="success",
+ details="RO account connection status success"
+ )
+ except:
+ self._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="failure",
+ details="RO account connection status failure"
+ )
+ self._log.warning("RO account connection status failure, Acct:%s, status:%s",
+ self.name, self._status)
+
+ def start_validate_ro_account(self, loop):
+ loop.run_in_executor(None, self.check_ro_account_status)
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+import rift.tasklets
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwRoAccountYang,
+ )
+
+from . import accounts
+
+class ROAccountConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_delete_apply=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_delete_apply = on_delete_apply
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+class ROAccountConfigSubscriber(object):
+ XPATH = "C,/rw-ro-account:ro-account/rw-ro-account:account"
+
+ def __init__(self, dts, log, loop, project, records_publisher, ro_callbacks):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+ self._records_publisher = records_publisher
+ self._ro_callbacks = ro_callbacks
+
+ self._reg = None
+ self.accounts = {}
+ self._log.debug("Inside RO Account Config Subscriber init")
+
+ def add_account(self, account_msg):
+ self._log.debug("adding ro account: {}".format(account_msg))
+
+ account = accounts.ROAccount(self._dts,
+ self._log,
+ self._loop,
+ self._project,
+ self._records_publisher,
+ account_msg)
+ self.accounts[account.name] = account
+ self._ro_callbacks.on_add_apply(account)
+
+ def delete_account(self, account_name):
+ self._log.debug("Deleting RO account: {}".format(account_name))
+ account = self.accounts[account_name]
+ del self.accounts[account_name]
+ self._ro_callbacks.on_delete_apply(account_name)
+
+ def deregister(self):
+ self._log.debug("Project {}: De-register ro account handler".
+ format(self._project))
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ def update_account(self, account):
+ """ Update an existing ro account
+
+ In order to simplify update, turn an update into a delete followed by
+ an add. The drawback to this approach is that we will not support
+ updates of an "in-use" ro account, but this seems like a
+ reasonable trade-off.
+
+ """
+ self._log.debug("updating ro account: {}".format(account))
+
+ self.delete_account(account.name)
+ self.add_account(account)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got ro account apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.debug("RO account being re-added after restart.")
+ self.add_account(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_account(cfg.name)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_account(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_account(cfg)
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for RO Account """
+
+ self._log.debug("RO account on_prepare config received (action: %s): %s",
+ xact_info.query_action, msg)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.error(
+ "Subscriber DTS prepare for project {}, action {} in class {} failed: {}".
+ format(self._project, xact_info.query_action, self.__class__, e))
+
+ self._log.debug("Registering for RO Account config using xpath: %s",
+ ROAccountConfigSubscriber.XPATH,
+ )
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ xpath = self._project.add_project(ROAccountConfigSubscriber.XPATH)
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
--- /dev/null
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import gi
+import rift.mano.dts as mano_dts
+import rift.tasklets
+from . import accounts
+
+from gi.repository import(
+ RwRoAccountYang,
+ RwDts as rwdts,
+ RwTypes,
+ )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+class ROAccountNotFound(Exception):
+ pass
+
+class ROAccountDtsOperdataHandler(object):
+ def __init__(self, dts, log, loop, project):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._project = project
+
+ self._regh = None
+ self._rpc = None
+ self._rsic = None
+ self._rdcp = None
+ self.ro_accounts = {}
+ self._nsr_sub = mano_dts.NsInstanceConfigSubscriber(
+ self._log,
+ self._dts,
+ self._loop,
+ self._project,
+ callback=self.handle_nsr)
+
+ def handle_nsr(self, nsr, action):
+ if action == rwdts.QueryAction.CREATE:
+ try:
+ self.ro_accounts[nsr.resource_orchestrator].live_instances += 1
+ except KeyError as e:
+ self.ro_accounts['rift'].live_instances += 1
+ elif action == rwdts.QueryAction.DELETE:
+ try:
+ self.ro_accounts[nsr.resource_orchestrator].live_instances -= 1
+ except KeyError as e:
+ self.ro_accounts['rift'].live_instances -= 1
+
+ def get_xpath(self):
+ return "D,/rw-ro-account:ro-account-state/account"
+
+ def get_qualified_xpath(self, ro_account_name):
+ if ro_account_name is None:
+ raise Exception("Account name cannot be None")
+
+ return self._project.add_project("D,/rw-ro-account:ro-account-state/account{}".format(
+ "[name=%s]" % quoted_key(ro_account_name))
+ )
+
+ def add_rift_ro_account(self):
+ rift_acc = accounts.ROAccount()
+ rift_acc._name = 'rift'
+ rift_acc._status = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConnectionStatus(
+ status="success",
+ details="RO account connection status success"
+ )
+ self.ro_accounts[rift_acc.name] = rift_acc
+ rift_acc_state = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account(name=rift_acc.name)
+ self._regh.create_element(self.get_qualified_xpath(rift_acc.name), rift_acc_state)
+
+ def add_ro_account(self, account):
+ self.ro_accounts[account.name] = account
+ account.start_validate_ro_account(self._loop)
+
+ def delete_ro_account(self, account_name):
+ account = self.ro_accounts[account_name]
+ del self.ro_accounts[account_name]
+
+ def get_saved_ro_accounts(self, ro_account_name):
+ ''' Get RO Account corresponding to passed name, or all saved accounts if name is None'''
+ saved_ro_accounts = []
+
+ if ro_account_name is None or ro_account_name == "":
+ ro_accounts = list(self.ro_accounts.values())
+ saved_ro_accounts.extend(ro_accounts)
+ elif ro_account_name in self.ro_accounts:
+ account = self.ro_accounts[ro_account_name]
+ saved_ro_accounts.append(account)
+ else:
+ errstr = "RO account {} does not exist".format(ro_account_name)
+ raise KeyError(errstr)
+
+ return saved_ro_accounts
+
+ @asyncio.coroutine
+ def _register_show_status(self):
+ def get_xpath(ro_account_name):
+ return "D,/rw-ro-account:ro-account-state/account{}/connection-status".format(
+ "[name=%s]" % quoted_key(ro_account_name)
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ connection_status = account._status
+
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=account._status,
+ )
+ except Exception as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(self.get_xpath())
+ self._regh = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ #ATTN: TODO: Should ideally wait for
+ #on_ready callback to be called.
+ self.add_rift_ro_account()
+
+ @asyncio.coroutine
+ def _register_show_instance_count(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/instance-ref-count".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ instance_count = account.live_instances
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_InstanceRefCount(count=instance_count)
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rsic = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_validate_rpc(self):
+ def get_xpath():
+ return "/rw-ro-account:update-ro-account-status"
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ if not msg.has_field("ro_account"):
+ raise ROAccountNotFound("RO account name not provided")
+ ro_account_name = msg.ro_account
+
+ if not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ try:
+ account = self.ro_accounts[ro_account_name]
+ except KeyError:
+ errmsg = "RO account name {} not found in project {}". \
+ format(ro_account_name, self._project.name)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ get_xpath(),
+ errmsg)
+ raise ROAccountNotFound(errmsg)
+
+ if ro_account_name != 'rift':
+ account.start_validate_ro_account(self._loop)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_data_center_publisher(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/datacenters".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ datacenters = []
+ if account.name == 'rift':
+ datacenters = [{'name': cloud.name, 'datacenter_type': cloud.account_type}
+ for cloud in self._project.cloud_accounts]
+ else :
+ datacenters = account._datacenters
+
+ response = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_Datacenters()
+ response.from_dict({'datacenters': datacenters})
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=response
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rdcp = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def _register_config_data_publisher(self):
+ def get_xpath(ro_account_name=None):
+ return "D,/rw-ro-account:ro-account-state/account{}/config-data".format(
+ "[name=%s]" % quoted_key(ro_account_name) if ro_account_name is not None else ''
+ )
+
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ path_entry = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account.schema().keyspec_to_entry(ks_path)
+ ro_account_name = path_entry.key00.name
+
+ try:
+ saved_accounts = self.get_saved_ro_accounts(ro_account_name)
+ for account in saved_accounts:
+ ro_acct_type = account.ro_acccount_type
+
+ response = RwRoAccountYang.YangData_RwProject_Project_RoAccountState_Account_ConfigData(ro_account_type=ro_acct_type)
+ xpath = self._project.add_project(get_xpath(account.name))
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ xpath=xpath,
+ msg=response
+ )
+ except KeyError as e:
+ self._log.warning(str(e))
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self._project.add_project(get_xpath())
+ self._rcdp = yield from self._dts.register(
+ xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ @asyncio.coroutine
+ def register(self):
+ self._log.debug("Register RO account for project %s", self._project.name)
+ yield from self._register_show_status()
+ yield from self._register_validate_rpc()
+ yield from self._register_show_instance_count()
+ yield from self._register_data_center_publisher()
+ yield from self._register_config_data_publisher()
+ yield from self._nsr_sub.register()
+
+ def deregister(self):
+ self._log.debug("De-register RO account for project %s", self._project.name)
+ self._rpc.deregister()
+ self._regh.deregister()
+ self._rsic.deregister()
+ self._rdcp.deregister()
+ self._rcdp.deregister()
+ self._nsr_sub.deregister()
-#
+#
# Copyright 2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
self._sdn = self.plugin.get_interface("Topology")
self._sdn.init(rwlog_hdl)
- self._status = RwsdnalYang.SDNAccount_ConnectionStatus(
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="unknown",
details="Connection status lookup not started"
)
@property
def sdnal_account_msg(self):
- return RwsdnalYang.SDNAccount.from_dict(
+ return RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList.from_dict(
self.account_msg.as_dict(),
ignore_missing_keys=True,
)
def sdn_account_msg(self, account_dict):
- self._account_msg = RwSdnYang.SDNAccount.from_dict(account_dict)
+ self._account_msg = RwSdnYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList.from_dict(account_dict)
@property
def account_type(self):
@asyncio.coroutine
def validate_sdn_account_credentials(self, loop):
- self._log.debug("Validating SDN Account credentials %s", self._account_msg)
- self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ self._log.debug("Validating SDN Account credentials %s",
+ self.name)
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="validating",
details="SDN account connection validation in progress"
)
self.sdnal_account_msg,
)
if rwstatus == RwTypes.RwStatus.SUCCESS:
- self._status = RwSdnYang.SDNAccount_ConnectionStatus.from_dict(status.as_dict())
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus.from_dict(status.as_dict())
else:
- self._status = RwSdnYang.SDNAccount_ConnectionStatus(
+ self._status = RwSdnYang.YangData_RwProject_Project_Sdn_Account_ConnectionStatus(
status="failure",
details="Error when calling SDNAL validate SDN creds"
)
- self._log.info("Got SDN account validation response: %s", self._status)
+ if self._status.status == 'failure':
+ self._log.error("SDN account validation failed; Acct: %s status: %s",
+ self.name, self._status)
def start_validate_credentials(self, loop):
if self._validate_task is not None:
ProtobufC,
)
+from rift.mano.utils.project import get_add_delete_update_cfgs
+
from . import accounts
pass
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
-
class SDNAccountConfigCallbacks(object):
def __init__(self,
on_add_apply=None, on_add_prepare=None,
class SDNAccountConfigSubscriber(object):
XPATH = "C,/rw-sdn:sdn/rw-sdn:account"
- def __init__(self, dts, log, rwlog_hdl, sdn_callbacks, acctstore):
+ def __init__(self, dts, log, project, rwlog_hdl, sdn_callbacks, acctstore):
self._dts = dts
self._log = log
+ self._project = project
self._rwlog_hdl = rwlog_hdl
self._reg = None
self.delete_account(account_msg.name)
self.add_account(account_msg)
+ def deregister(self):
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
def register(self):
@asyncio.coroutine
def apply_config(dts, acg, xact, action, _):
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self._project.add_project(SDNAccountConfigSubscriber.XPATH)
self._log.debug("Registering for SDN Account config using xpath: %s",
- SDNAccountConfigSubscriber.XPATH,
+ xpath,
)
acg_handler = rift.tasklets.AppConfGroup.Handler(
with self._dts.appconf_group_create(acg_handler) as acg:
self._reg = acg.register(
- xpath=SDNAccountConfigSubscriber.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare,
)
#
import asyncio
+import gi
+
import rift.tasklets
from gi.repository import(
RwSdnYang,
+ RwsdnalYang,
RwDts as rwdts,
+ RwTypes,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class SDNAccountNotFound(Exception):
class SDNAccountDtsOperdataHandler(object):
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self.sdn_accounts = {}
+ self._oper = None
+ self._rpc = None
def add_sdn_account(self, account):
self.sdn_accounts[account.name] = account
return saved_sdn_accounts
+ @asyncio.coroutine
+ def create_notification(self, account):
+ xpath = "N,/rw-sdn:sdn-notif"
+ ac_status = RwSdnYang.YangNotif_RwSdn_SdnNotif()
+ ac_status.name = account.name
+ ac_status.message = account.connection_status.details
+
+ yield from self._dts.query_create(xpath, rwdts.XactFlag.ADVISE, ac_status)
+ self._log.info("Notification called by creating dts query: %s", ac_status)
+
+
+ @asyncio.coroutine
def _register_show_status(self):
+ self._log.debug("Registering for show for project {}".format(self._project))
def get_xpath(sdn_name=None):
- return "D,/rw-sdn:sdn/account{}/connection-status".format(
- "[name='%s']" % sdn_name if sdn_name is not None else ''
- )
+ return self._project.add_project("D,/rw-sdn:sdn/rw-sdn:account{}/rw-sdn:connection-status".
+ format(
+ "[rw-sdn:name=%s]" % quoted_key(sdn_name)
+ if sdn_name is not None else ''))
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
- self._log.debug("Got show SDN connection status request: %s", ks_path.create_string())
- path_entry = RwSdnYang.SDNAccount.schema().keyspec_to_entry(ks_path)
+ xpath = ks_path.to_xpath(RwSdnYang.get_schema())
+ self._log.debug("Got show SDN connection status request: %s", xpath)
+ path_entry = RwSdnYang.YangData_RwProject_Project_Sdn_Account.schema().keyspec_to_entry(ks_path)
sdn_account_name = path_entry.key00.name
try:
saved_accounts = self.get_saved_sdn_accounts(sdn_account_name)
for account in saved_accounts:
connection_status = account.connection_status
- self._log.debug("Responding to SDN connection status request: %s", connection_status)
+ self._log.debug("Responding to SDN connection status request: %s",
+ connection_status)
xact_info.respond_xpath(
rwdts.XactRspCode.MORE,
xpath=get_xpath(account.name),
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
+ self._oper = yield from self._dts.register(
xpath=get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare),
flags=rwdts.Flag.PUBLISHER,
)
+ @asyncio.coroutine
def _register_validate_rpc(self):
+ self._log.debug("Registering for rpc for project {}".format(self._project))
def get_xpath():
return "/rw-sdn:update-sdn-status"
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if self._project and not self._project.rpc_check(msg, xact_info=xact_info):
+ return
+
+ self._log.debug("Got update SDN connection status request: %s", msg)
+
if not msg.has_field("sdn_account"):
raise SDNAccountNotFound("SDN account name not provided")
try:
account = self.sdn_accounts[sdn_account_name]
except KeyError:
- raise SDNAccountNotFound("SDN account name %s not found" % sdn_account_name)
+ errmsg = "SDN account name %s not found" % sdn_account_name
+ self._log.error(errmsg)
+ xpath = ks_path.to_xpath(RwSdnYang.get_schema())
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ xpath,
+ errmsg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
account.start_validate_credentials(self._loop)
+ yield from self.create_notification(account)
+
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- yield from self._dts.register(
- xpath=get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._rpc = yield from self._dts.register(
+ xpath=get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
@asyncio.coroutine
def register(self):
yield from self._register_show_status()
yield from self._register_validate_rpc()
+
+ def deregister(self):
+ if self._oper:
+ self._oper.deregister()
+ self._oper = None
+
+ if self._rpc:
+ self._rpc.deregister()
+ self._rpc = None
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
-description: Toy NS
+description: Translated from Tosca
data_types:
tosca.datatypes.nfv.riftio.dashboard_params:
properties:
# TODO(Philip): Harcoding for now, need to make this generic
def get_xpath(self):
- xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:' + self.name
+ xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:' + self.name
return xpath
def get_dict_output(self):
nsd.vendor = self.metadata['vendor']
nsd.short_name = self.metadata['name']
nsd.version = self.metadata['version']
+ if 'logo' in self.metadata:
+ nsd.logo = self.metadata['logo']
except Exception as e:
self.log.warning(_("Unable to use YANG GI to generate "
"descriptors, falling back to alternate "
if resource.type == 'vld':
resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+ vnf_type_duplicate = []
+ vnfd_resources = []
+ vnfd_duplicate_resource_list = []
for resource in self.resources:
- # Do the vnfds next
if resource.type == 'vnfd':
+ vnfd_resources.append(resource)
+
+ vnfd_resources.sort(key=lambda x: x.member_vnf_id, reverse=False)
+ vnf_type_to_vnf_id = {}
+ for resource in vnfd_resources:
+ if resource.vnf_type not in vnf_type_duplicate:
resource.generate_yang_model(nsd, vnfds, use_gi=use_gi)
+ vnf_type_to_vnf_id[resource.vnf_type] = resource.id
+ vnf_type_duplicate.append(resource.vnf_type)
+ else:
+ vnfd_duplicate_resource_list.append(resource)
+
+ for resource in vnfd_duplicate_resource_list:
+ resource.generate_nsd_constiuent(nsd, vnf_type_to_vnf_id[resource.vnf_type])
for resource in self.resources:
# Do the other nodes
if use_gi:
for param in self.parameters:
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath=param.get_xpath(),
)
)
# Need to add support to get script names, charms, etc.
other_files = {}
for resource in self.resources:
- resource.get_supporting_files(other_files)
+ resource.get_supporting_files(other_files, desc_id=nsd_id)
for policy in self.policies:
policy.get_supporting_files(other_files, desc_id=nsd_id)
self.YANG: vnfd_pf,
}
+
if vnfd_id in other_files:
vnfd_out[self.FILES] = other_files[vnfd_id]
return {pf+'-catalog': {pf: [desc]}}
def get_yaml(self, module_list, desc):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
+
for module in module_list:
model.load_module(module)
return desc.to_yaml(model)
format(self.name, tosca_props))
vdu_props = {}
for key, value in tosca_props.items():
- if key == 'cloud_init':
- vdu_props['cloud-init'] = value
- elif key == 'cloud-init-file':
- self._cloud_init = "../cloud_init/{}".format(value)
- else:
- vdu_props[key] = value
+ vdu_props[key] = value
if 'name' not in vdu_props:
vdu_props['name'] = self.name
self.properties['guest-epa'] = get_guest_epa(tosca_caps['numa_extension'], tosca_caps['nfv_compute'])
if 'monitoring_param' in tosca_caps:
self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param'], '1'))
- if 'monitoring_param_1' in tosca_caps:
- self._monitor_param.append(get_monitor_param(tosca_caps['monitoring_param_1'], '2'))
if 'mgmt_interface' in tosca_caps:
self._mgmt_interface = get_mgmt_interface(tosca_caps['mgmt_interface'])
if len(self._mgmt_interface) > 0:
prop['port'] = self._mgmt_interface['dashboard-params']['port']
self._http_endpoint = prop
+ mon_idx = 2
+ monitoring_param_name = 'monitoring_param_1'
+ while True:
+ if monitoring_param_name in tosca_caps:
+ self._monitor_param.append(get_monitor_param(tosca_caps[monitoring_param_name], str(mon_idx)))
+ mon_idx += 1
+ monitoring_param_name = 'monitoring_param_{}'.format(mon_idx)
+ else:
+ break
+ # THis is a quick hack to remove monitor params without name
+ for mon_param in list(self._monitor_param):
+ if 'name' not in mon_param:
+ self._monitor_param.remove(mon_param)
def handle_artifacts(self):
if self.artifacts is None:
self.artifacts = arts
def handle_interfaces(self):
- # Currently, we support only create operation
- operations_deploy_sequence = ['create']
+ # Currently, we support the following:
+ operations_deploy_sequence = ['create', 'configure']
operations = ManoResource._get_all_operations(self.nodetemplate)
self.operations[operation.name] = operation.implementation
for name, details in self.artifacts.items():
if name == operation.implementation:
- self._image = details['file']
+ if operation.name == 'create':
+ self._image = details['file']
+ elif operation.name == 'configure':
+ self._cloud_init = details['file']
+ break
except KeyError as e:
self.log.exception(e)
return None
if self._image_cksum:
self.properties['image-checksum'] = self._image_cksum
+ if self._cloud_init:
+ self.properties['cloud-init-file'] = os.path.basename(self._cloud_init)
+
for key in ToscaCompute.IGNORE_PROPS:
if key in self.properties:
self.properties.pop(key)
self.log.debug(_("{0} with tosca properties: {1}").
format(self, tosca_props))
self.properties['name'] = tosca_props['name']
- self.properties['seq'] = \
- tosca_props['seq']
+ self.properties['seq'] = int(tosca_props['seq'])
self.properties['user-defined-script'] = \
tosca_props['user_defined_script']
self.scripts.append('../scripts/{}'. \
if 'parameter' in tosca_props:
self.properties['parameter'] = []
- for name, value in tosca_props['parameter'].items():
+ for parameter in tosca_props['parameter']:
self.properties['parameter'].append({
- 'name': name,
- 'value': value,
+ 'name': parameter['name'],
+ 'value': str(parameter['value']),
})
-
self.log.debug(_("{0} properties: {1}").format(self, self.properties))
def get_policy_props(self):
ip_profile_param['ip-version'] = 'ipv' + str(specs['ip_version'])
if 'cidr' in specs:
ip_profile_param['subnet-address'] = specs['cidr']
+ ip_profile_prop['ip-profile-params'] = ip_profile_param
- ip_profile_prop['ip-profile-params'] = ip_profile_param
return ip_profile_prop
tosca_props = self.get_tosca_props()
self._vld = get_vld_props(tosca_props)
ip_profile_props = convert_keys_to_python(self._ip_profile)
try:
nsd.vld.add().from_dict(vld_props)
- nsd.ip_profiles.add().from_dict(ip_profile_props)
+ if len(ip_profile_props) > 1:
+ nsd.ip_profiles.add().from_dict(ip_profile_props)
except Exception as e:
err_msg = _("{0} Exception vld from dict {1}: {2}"). \
format(self, props, e)
import gi
gi.require_version('RwVnfdYang', '1.0')
- from gi.repository import RwVnfdYang
+ from gi.repository import RwVnfdYang as RwVnfdYang
except ImportError:
pass
except ValueError:
self._policies = []
self._cps = []
self.vnf_type = nodetemplate.type
+ self.member_vnf_id = None
self._reqs = {}
+ self.logo = None
def map_tosca_name_to_mano(self, name):
new_name = super().map_tosca_name_to_mano(name)
if key == 'id':
self._const_vnfd['member-vnf-index'] = int(value)
self._const_vnfd['vnfd-id-ref'] = self.id
+ self.member_vnf_id = int(value)
elif key == 'vnf_configuration':
self._vnf_config = get_vnf_config(value)
else:
vnf_props.pop('start_by_default')
if 'logo' in self.metadata:
vnf_props['logo'] = self.metadata['logo']
+ self.logo = self.metadata['logo']
+
self.log.debug(_("VNF {0} with constituent vnf: {1}").
format(self.name, self._const_vnfd))
nsd['constituent-vnfd'] = []
nsd['constituent-vnfd'].append(self._const_vnfd)
+ def generate_nsd_constiuent(self, nsd, vnf_id):
+ self._const_vnfd['vnfd-id-ref'] = vnf_id
+ props = convert_keys_to_python(self._const_vnfd)
+ nsd.constituent_vnfd.add().from_dict(props)
+
+
def get_member_vnf_index(self):
return self._const_vnfd['member-vnf-index']
'type': 'cloud_init',
'name': vdu.cloud_init,
},)
+ if self.logo is not None:
+ files[desc_id] = []
+ file_location = "../icons/{}".format(self.logo)
+ files[desc_id].append({
+ 'type': 'icons',
+ 'name': file_location,
+ },)
conf = {}
if _validate_action(value):
conf['trigger'] = action
- conf['ns-config-primitive-name-ref'] = value
+ conf['ns-service-primitive-name-ref'] = value
self.properties['scaling-config-action'].append(conf)
else:
err_msg = _("{0}: Did not find the action {1} in "
self._vnf_id = vnf_node.id
self.properties["vnf-configuration"] = {}
prop = {}
- prop["config-attributes"] = {}
+ #prop["config-attributes"] = {}
prop["script"] = {}
if 'config' in tosca_props:
- if 'config_delay' in tosca_props['config']:
- prop["config-attributes"]['config-delay'] = tosca_props['config']['config_delay']
- if 'config_priority' in tosca_props['config']:
- prop["config-attributes"]['config-priority'] = tosca_props['config']['config_priority']
+ # if 'config_delay' in tosca_props['config']:
+ # prop["config-attributes"]['config-delay'] = tosca_props['config']['config_delay']
+ # if 'config_priority' in tosca_props['config']:
+ # prop["config-attributes"]['config-priority'] = tosca_props['config']['config_priority']
if 'config_template' in tosca_props['config']:
prop["config-template"] = tosca_props['config']['config_template']
if 'config_details' in tosca_props['config']:
prop["script"]["script-type"] = tosca_props['config']['config_details']['script_type']
if 'initial_config' in tosca_props:
prop['initial-config-primitive'] = []
- #print("Weleek " + str(tosca_props['initial_config']))
for init_config in tosca_props['initial_config']:
if 'parameter' in init_config:
parameters = init_config.pop('parameter')
init_config['parameter'] = []
- for key, value in parameters.items():
- init_config['parameter'].append({'name': key, 'value': str(value)})
- if 'user_defined_script' in init_config:
- self.scripts.append('../scripts/{}'. \
+ for parameter in parameters:
+ for key, value in parameter.items():
+ init_config['parameter'].append({'name': key, 'value': str(value)})
+
+ if 'user_defined_script' in init_config:
+ self.scripts.append('../scripts/{}'. \
format(init_config['user_defined_script']))
prop['initial-config-primitive'].append(init_config)
return
if self._vnf_id not in files:
- files[desc_id] = []
+ files[self._vnf_id] = []
for script in self.scripts:
files[self._vnf_id].append({
'type': 'script',
'name': script,
- },)
\ No newline at end of file
+ },)
--- /dev/null
+#
+# Copyright 2016 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from rift.mano.tosca_translator.common.utils import _
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+from rift.mano.tosca_translator.rwmano.syntax.mano_resource import ManoResource
+from toscaparser.functions import GetInput
+from rift.mano.tosca_translator.common.utils import convert_keys_to_python
+
+from toscaparser.common.exception import ValidationError
+
+
+# Name used to dynamically load appropriate map class.
+TARGET_CLASS_NAME = 'ToscaVnfNSServiceConfiguration'
+
+
+class ToscaVnfNSServiceConfiguration(ManoResource):
+ '''Translate TOSCA node type tosca.policies.Scaling.'''
+
+ toscatype = 'tosca.policies.nfv.riftio.ns_service_primitives'
+
+ IGNORE_PROPS = []
+ VALUE_TYPE_CONVERSION_MAP = {
+ 'integer': 'INTEGER',
+ 'string':'STRING',
+ 'float':'DECIMAL',
+ 'INTEGER': 'INTEGER',
+ 'FLOAT':'DECIMAL'
+
+ }
+
+ def __init__(self, log, policy, metadata=None, vnf_name = None):
+ self.log = log
+ self.name = policy.name
+ self.type_ = 'place-grp'
+ self.metadata = metadata
+ self.linked_to_vnf = False
+ self.policy = policy
+ self.service_primitive = None
+ self.properties = {}
+ self.scripts = []
+
+ def __str__(self):
+ return "%s(%s)" % (self.name, self.type)
+
+ def handle_properties(self, nodes, groups):
+ tosca_props = self.get_policy_props()
+ service_primitive = {}
+ if 'name' in tosca_props:
+ service_primitive['name'] = tosca_props['name']
+ if 'user_defined_script' in tosca_props:
+ service_primitive['user_defined_script'] = tosca_props['user_defined_script']
+ self.scripts.append('../scripts/{}'. \
+ format(tosca_props['user_defined_script']))
+
+
+ if 'parameter' in tosca_props:
+ service_primitive['parameter'] = []
+ for parameter in tosca_props['parameter']:
+ prop = {}
+ if 'name' in parameter:
+ prop['name'] = parameter['name']
+ if 'hidden' in parameter:
+ prop['hidden'] = parameter['hidden']
+ if 'mandatory' in parameter:
+ prop['mandatory'] = parameter['mandatory']
+ if 'data_type' in parameter:
+ prop['data_type'] = ToscaVnfNSServiceConfiguration.VALUE_TYPE_CONVERSION_MAP[parameter['data_type']]
+ if 'default_value' in parameter:
+ prop['default_value'] = str(parameter['default_value'])
+ service_primitive['parameter'].append(prop)
+
+ self.service_primitive = service_primitive
+
+
+
+
+ #self.properties = prop
+
+ def generate_yang_submodel_gi(self, vnfd):
+ pass
+
+ def generate_yang_model(self, nsd, vnfds, use_gi):
+ if self.service_primitive is not None:
+ nsd.service_primitive.add().from_dict(self.service_primitive)
+
+ def get_policy_props(self):
+ tosca_props = {}
+
+ for prop in self.policy.get_properties_objects():
+ if isinstance(prop.value, GetInput):
+ tosca_props[prop.name] = {'get_param': prop.value.input_name}
+ else:
+ tosca_props[prop.name] = prop.value
+ return tosca_props
+ def get_supporting_files(self, files, desc_id=None):
+ if not len(self.scripts):
+ return
+ if desc_id not in files:
+ return
+ for script in self.scripts:
+ files[desc_id].append({
+ 'type': 'script',
+ 'name': script,
+ },)
\ No newline at end of file
self.log.debug(_("Metadata {0}").format(metadata))
self.metadata = metadata
+
def _recursive_handle_properties(self, resource):
'''Recursively handle the properties of the depends_on_nodes nodes.'''
# Use of hashtable (dict) here should be faster?
vnf_type_to_vdus_map[vnf_type].append(node.name)
for policy in template.policies:
policies.append(policy.name)
- for req in template.substitution_mappings.requirements:
- vnf_type_substitution_mapping[template.substitution_mappings.node_type].append(req)
+ if template.substitution_mappings.requirements:
+ for req in template.substitution_mappings.requirements:
+ vnf_type_substitution_mapping[template.substitution_mappings.node_type].append(req)
if template.substitution_mappings.capabilities:
for capability in template.substitution_mappings.capabilities:
sub_list = template.substitution_mappings.capabilities[capability]
metadata=self.metadata)
mano_node.vnf_type = vnf_type
self.mano_resources.append(mano_node)
- print("Adding a new node")
for node in self.tosca.nodetemplates:
if 'VDU' in node.type:
vnf_name=vnf_node)
self.mano_policies.append(policy_node)
+ vnfd_resources = []
for node in self.mano_resources:
self.log.debug(_("Handle properties for {0} of type {1}").
format(node.name, node.type_))
format(node.name, node.type_))
node.update_image_checksum(self.tosca.path)
+ for node in list(self.mano_resources):
+ if node.type == "vnfd":
+ vnfd_resources.append(node)
+ self.mano_resources.remove(node)
+
+ vnfd_resources.sort(key=lambda x: x.member_vnf_id, reverse=True)
+ vnf_type_duplicate_map = {}
+ for node in reversed(vnfd_resources):
+ if node.vnf_type in vnf_type_duplicate_map:
+ for policy in self.mano_policies:
+ if hasattr(policy, '_vnf_name') and policy._vnf_name == node.name:
+ policy._vnf_name = vnf_type_duplicate_map[node.vnf_type]
+ continue
+ vnf_type_duplicate_map[node.vnf_type] = node.name
+
+ self.mano_resources.extend(vnfd_resources)
for node in self.mano_resources:
# Handle vnf and vdu dependencies first
if node.type == "vnfd":
dest = os.path.join(output_dir, 'images')
elif ty == 'script':
dest = os.path.join(output_dir, 'scripts')
+ elif ty == 'icons':
+ dest = os.path.join(output_dir, 'icons')
elif ty == 'cloud_init':
dest = os.path.join(output_dir, 'cloud_init')
else:
data_types:
tosca.datatypes.network.riftio.vnf_configuration:
properties:
- config_delay:
- constraints:
- - greater_or_equal: 0
- default: 0
- required: no
- type: integer
config_details:
type: map
- config_priority:
- constraints:
- - greater_than: 0
- type: integer
- config_template:
- required: no
- type: string
config_type:
type: string
capability_types:
vnf_configuration:
config_delay: 0
config_details:
- script_type: bash
- config_priority: 2
- config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
- ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
- \ to configure VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server info for\
- \ ping!\"\n exit $rc\nfi\n\ncurl -D /dev/stdout \\\n -H \"Accept:\
- \ application/vnd.yang.data+xml\" \\\n -H \"Content-Type: application/vnd.yang.data+json\"\
- \ \\\n -X POST \\\n -d \"{\\\"rate\\\":$ping_rate}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set ping rate!\"\n\
- \ exit $rc\nfi\n\nexit 0\n"
+ script_type: rift
config_type: script
capabilities:
http_endpoint:
vnf_configuration:
config_delay: 60
config_details:
- script_type: bash
- config_priority: 1
- config_template: "\n#!/bin/bash\n\n# Rest API configuration\npong_mgmt_ip=<rw_mgmt_ip>\n\
- pong_mgmt_port=18889\n# username=<rw_username>\n# password=<rw_password>\n\
- \n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nserver_port=5555\n\n# Make Rest API calls to configure\
- \ VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server(own) info\
- \ for pong!\"\n exit $rc\nfi\n\nexit 0\n"
+ script_type: rift
config_type: script
capabilities:
http_endpoint:
--- /dev/null
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Toy NS
+metadata:
+ ID: ping_pong_nsd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- "ping_vnfd.yaml"
+- "pong_vnfd.yaml"
+topology_template:
+ policies:
+ - initial_config_primitive:
+ properties:
+ name: start traffic
+ seq: 1
+ user_defined_script: start_traffic.py
+ targets: [pong_vnfd]
+ type: tosca.policies.nfv.riftio.initial_config_primitive
+ - placement_0:
+ properties:
+ name: Orcus
+ requirement: Place this VM on the Kuiper belt object Orcus
+ strategy: COLOCATION
+ targets: [ping_vnfd, pong_vnfd]
+ type: tosca.policies.nfv.riftio.placement
+ - placement_1:
+ properties:
+ name: Quaoar
+ requirement: Place this VM on the Kuiper belt object Quaoar
+ strategy: COLOCATION
+ targets: [ping_vnfd, pong_vnfd]
+ type: tosca.policies.nfv.riftio.placement
+ node_templates:
+ pong_vnfd:
+ type: tosca.nodes.nfv.riftio.pongvnfdVNF
+ properties:
+ id: 2
+ vendor: RIFT.io
+ version: 1.0
+ requirements:
+ - virtualLink1: ping_pong_vld
+ ping_pong_vld:
+ type: tosca.nodes.nfv.riftio.ELAN
+ properties:
+ cidr: 31.31.31.0/24
+ description: Toy VL
+ gateway_ip: 31.31.31.210
+ ip_version: 4
+ vendor: RIFT.io
+ ping_vnfd:
+ type: tosca.nodes.nfv.riftio.pingvnfdVNF
+ properties:
+ id: 1
+ vendor: RIFT.io
+ version: 1.0
+ requirements:
+ - virtualLink1: ping_pong_vld
--- /dev/null
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: This is an example RIFT.ware VNF
+metadata:
+ ID: ping_vnfd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- riftiotypes.yaml
+node_types:
+ tosca.nodes.nfv.riftio.pingvnfdVNF:
+ derived_from: tosca.nodes.nfv.riftio.VNF1
+ requirements:
+ - virtualLink1:
+ type: tosca.nodes.nfv.VL
+topology_template:
+ policies:
+ - configuration:
+ properties:
+ config:
+ config_details:
+ script_type: rift
+ config_type: script
+ initial_config_primitive:
+ - name: set ping rate
+ parameter:
+ - rate: 5
+ seq: 1
+ user_defined_script: ping_set_rate.py
+ targets: [ping_vnfd_iovdu_0]
+ type: tosca.policies.nfv.riftio.vnf_configuration
+ substitution_mappings:
+ node_type: tosca.nodes.nfv.riftio.pingvnfdVNF
+ requirements:
+ - virtualLink1: [ping_vnfd_cp0, virtualLink]
+ node_templates:
+ ping_vnfd_iovdu_0:
+ type: tosca.nodes.nfv.riftio.VDU1
+ properties:
+ cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+ ssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl,\
+ \ enable, ping.service ]\n - [ systemctl, start, --no-block, ping.service\
+ \ ]\n - [ ifup, eth1 ]\n"
+ count: 1
+ capabilities:
+ hypervisor_epa:
+ properties:
+ type: PREFER_KVM
+ version: 1
+ mgmt_interface:
+ properties:
+ dashboard_params:
+ path: api/v1/ping/stats
+ port: 18888
+ port: 18888
+ protocol: tcp
+ monitoring_param:
+ properties:
+ description: no of ping requests
+ json_query_method: namekey
+ name: ping-request-tx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/ping/stats
+ monitoring_param_1:
+ properties:
+ description: no of ping responses
+ json_query_method: namekey
+ name: ping-response-rx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/ping/stats
+ nfv_compute:
+ properties:
+ cpu_allocation:
+ cpu_affinity: dedicated
+ thread_allocation: prefer
+ disk_size: 4 GB
+ mem_page_size: normal
+ mem_size: 1024 MB
+ num_cpus: 4
+ numa_extension:
+ properties:
+ mem_policy: STRICT
+ node:
+ - id: 0
+ mem_size: 512 MB
+ vcpus:
+ - 0
+ - 1
+ - id: 1
+ mem_size: 512 MB
+ vcpus:
+ - 2
+ - 3
+ node_cnt: 2
+ vswitch_epa:
+ properties:
+ ovs_acceleration: DISABLED
+ ovs_offload: DISABLED
+ artifacts:
+ ping_vnfd_iovdu_0_vm_image:
+ file: ../images/Fedora-x86_64-20-20131211.1-sda-ping.qcow2
+ image_checksum: a6ffaa77f949a9e4ebb082c6147187cf
+ type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+ interfaces:
+ Standard:
+ create: ping_vnfd_iovdu_0_vm_image
+ ping_vnfd_cp0:
+ type: tosca.nodes.nfv.riftio.CP1
+ properties:
+ cp_type: VPORT
+ name: ping_vnfd/cp0
+ vdu_intf_name: eth0
+ vdu_intf_type: VIRTIO
+ requirements:
+ - virtualBinding:
+ node: ping_vnfd_iovdu_0
--- /dev/null
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: This is an example RIFT.ware VNF
+metadata:
+ ID: pong_vnfd
+ vendor: RIFT.io
+ version: 1.0
+imports:
+- riftiotypes.yaml
+node_types:
+ tosca.nodes.nfv.riftio.pongvnfdVNF:
+ derived_from: tosca.nodes.nfv.riftio.VNF1
+ requirements:
+ - virtualLink1:
+ type: tosca.nodes.nfv.VL
+topology_template:
+ policies:
+ - configuration:
+ properties:
+ config:
+ config_details:
+ script_type: rift
+ config_type: script
+ targets: [pong_vnfd_iovdu_0]
+ type: tosca.policies.nfv.riftio.vnf_configuration
+ substitution_mappings:
+ node_type: tosca.nodes.nfv.riftio.pongvnfdVNF
+ requirements:
+ - virtualLink1: [pong_vnfd_cp0, virtualLink]
+ node_templates:
+ pong_vnfd_iovdu_0:
+ type: tosca.nodes.nfv.riftio.VDU1
+ properties:
+ cloud_init: "#cloud-config\npassword: fedora\nchpasswd: { expire: False }\n\
+ ssh_pwauth: True\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl,\
+ \ enable, pong.service ]\n - [ systemctl, start, --no-block, pong.service\
+ \ ]\n - [ ifup, eth1 ]\n"
+ count: 1
+ capabilities:
+ hypervisor_epa:
+ properties:
+ type: PREFER_KVM
+ version: 1
+ mgmt_interface:
+ properties:
+ dashboard_params:
+ path: api/v1/pong/stats
+ port: 18889
+ port: 18889
+ protocol: tcp
+ monitoring_param:
+ properties:
+ description: no of ping requests
+ json_query_method: namekey
+ name: ping-request-rx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/pong/stats
+ monitoring_param_1:
+ properties:
+ description: no of ping responses
+ json_query_method: namekey
+ name: ping-response-tx-count
+ ui_data:
+ group_tag: Group1
+ units: packets
+ widget_type: counter
+ url_path: api/v1/pong/stats
+ nfv_compute:
+ properties:
+ cpu_allocation:
+ cpu_affinity: dedicated
+ thread_allocation: prefer
+ disk_size: 4 GB
+ mem_page_size: normal
+ mem_size: 1024 MB
+ num_cpus: 4
+ numa_extension:
+ properties:
+ mem_policy: STRICT
+ node:
+ - id: 0
+ mem_size: 512 MB
+ vcpus:
+ - 0
+ - 1
+ - id: 1
+ mem_size: 512 MB
+ vcpus:
+ - 2
+ - 3
+ node_cnt: 2
+ vswitch_epa:
+ properties:
+ ovs_acceleration: DISABLED
+ ovs_offload: DISABLED
+ artifacts:
+ pong_vnfd_iovdu_0_vm_image:
+ file: ../images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2
+ image_checksum: 977484d95575f80ef8399c9cf1d45ebd
+ type: tosca.artifacts.Deployment.Image.riftio.QCOW2
+ interfaces:
+ Standard:
+ create: pong_vnfd_iovdu_0_vm_image
+ pong_vnfd_cp0:
+ type: tosca.nodes.nfv.riftio.CP1
+ properties:
+ cp_type: VPORT
+ name: pong_vnfd/cp0
+ vdu_intf_name: eth0
+ vdu_intf_type: VIRTIO
+ requirements:
+ - virtualBinding:
+ node: pong_vnfd_iovdu_0
--- /dev/null
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0
+description: Extended types
+
+
+data_types:
+ tosca.datatypes.nfv.riftio.dashboard_params:
+ properties:
+ path:
+ type: string
+ description: >-
+ The HTTP path for the dashboard
+ port:
+ type: tosca.datatypes.network.PortDef
+ description: >-
+ The HTTP port for the dashboard
+ default: 80
+ https:
+ type: boolean
+ description: >-
+ Pick HTTPS instead of HTTP , Default is false
+ default: false
+ required: false
+ tosca.datatypes.nfv.riftio.monitoring_param_ui:
+ properties:
+ description:
+ type: string
+ required: false
+ group_tag:
+ type: string
+ description: >-
+ A simple tag to group monitoring parameters
+ required: false
+ widget_type:
+ type: string
+ description: >-
+ Type of the widget
+ default: counter
+ constraints:
+ - valid_values:
+ - histogram
+ - bar
+ - gauge
+ - slider
+ - counter
+ - textbox
+ units:
+ type: string
+ required: false
+ tosca.datatypes.nfv.riftio.monitoring_param_value:
+ properties:
+ value_type:
+ type: string
+ default: integer
+ constraints:
+ - valid_values:
+ - integer
+ - float
+ - string
+ numeric_min:
+ type: integer
+ description: >-
+ Minimum value for the parameter
+ required: false
+ numeric_max:
+ type: integer
+ description: >-
+ Maxium value for the parameter
+ required: false
+ string_min:
+ type: integer
+ description: >-
+ Minimum string length for the parameter
+ required: false
+ constraints:
+ - greater_or_equal: 0
+ string_max:
+ type: integer
+ description: >-
+ Maximum string length for the parameter
+ required: false
+ constraints:
+ - greater_or_equal: 0
+ tosca.datatypes.compute.Container.Architecture.CPUAllocation:
+ derived_from: tosca.datatypes.Root
+ properties:
+ cpu_affinity:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [shared, dedicated, any]
+ thread_allocation:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [avoid, separate, isolate, prefer]
+ socket_count:
+ type: integer
+ required: false
+ core_count:
+ type: integer
+ required: false
+ thread_count:
+ type: integer
+ required: false
+
+ tosca.datatypes.compute.Container.Architecture.NUMA:
+ derived_from: tosca.datatypes.Root
+ properties:
+ id:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ vcpus:
+ type: list
+ entry_schema:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ mem_size:
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ tosca.datatypes.nfv.riftio.paired_thread_map:
+ properties:
+ thread_a:
+ type: integer
+ required: true
+ constraints:
+ - greater_or_equal: 0
+ thread_b:
+ type: integer
+ required: true
+ constraints:
+ - greater_or_equal: 0
+
+ tosca.datatypes.nfv.riftio.paired_threads:
+ properties:
+ num_paired_threads:
+ type: integer
+ constraints:
+ - greater_or_equal: 1
+ paired_thread_ids:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.paired_thread_map
+ constraints:
+ - max_length: 16
+ required: false
+
+ tosca.datatypes.compute.riftio.numa:
+ properties:
+ id:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ vcpus:
+ type: list
+ entry_schema:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ mem_size:
+ type: scalar-unit.size
+ constraints:
+ - greater_or_equal: 0 MB
+ required: false
+ om_numa_type:
+ type: string
+ description: Openmano Numa type selection
+ constraints:
+ - valid_values: [cores, paired-threads, threads]
+ required: false
+ num_cores:
+ type: integer
+ description: Use when om_numa_type is cores
+ constraints:
+ - greater_or_equal: 1
+ required: false
+ paired_threads:
+ type: tosca.datatypes.nfv.riftio.paired_threads
+ description: Use when om_numa_type is paired-threads
+ required: false
+ num_threads:
+ type: integer
+ description: Use when om_numa_type is threads
+ constraints:
+ - greater_or_equal: 1
+ required: false
+
+ tosca.nfv.datatypes.pathType:
+ properties:
+ forwarder:
+ type: string
+ required: true
+ capability:
+ type: string
+ required: true
+
+ tosca.nfv.datatypes.aclType:
+ properties:
+ eth_type:
+ type: string
+ required: false
+ eth_src:
+ type: string
+ required: false
+ eth_dst:
+ type: string
+ required: false
+ vlan_id:
+ type: integer
+ constraints:
+ - in_range: [ 1, 4094 ]
+ required: false
+ vlan_pcp:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7 ]
+ required: false
+ mpls_label:
+ type: integer
+ constraints:
+ - in_range: [ 16, 1048575]
+ required: false
+ mpls_tc:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7 ]
+ required: false
+ ip_dscp:
+ type: integer
+ constraints:
+ - in_range: [ 0, 63 ]
+ required: false
+ ip_ecn:
+ type: integer
+ constraints:
+ - in_range: [ 0, 3 ]
+ required: false
+ ip_src_prefix:
+ type: string
+ required: false
+ ip_dst_prefix:
+ type: string
+ required: false
+ ip_proto:
+ type: integer
+ constraints:
+ - in_range: [ 1, 254 ]
+ required: false
+ destination_port_range:
+ type: integer
+ required: false
+ source_port_range:
+ type: integer
+ required: false
+ network_src_port_id:
+ type: string
+ required: false
+ network_dst_port_id:
+ type: string
+ required: false
+ network_id:
+ type: string
+ required: false
+ network_name:
+ type: string
+ required: false
+ tenant_id:
+ type: string
+ required: false
+ icmpv4_type:
+ type: integer
+ constraints:
+ - in_range: [ 0, 254 ]
+ required: false
+ icmpv4_code:
+ type: integer
+ constraints:
+ - in_range: [ 0, 15 ]
+ required: false
+ arp_op:
+ type: integer
+ constraints:
+ - in_range: [ 1, 25 ]
+ required: false
+ arp_spa:
+ type: string
+ required: false
+ arp_tpa:
+ type: string
+ required: false
+ arp_sha:
+ type: string
+ required: false
+ arp_tha:
+ type: string
+ required: false
+ ipv6_src:
+ type: string
+ required: false
+ ipv6_dst:
+ type: string
+ required: false
+ ipv6_flabel:
+ type: integer
+ constraints:
+ - in_range: [ 0, 1048575]
+ required: false
+ icmpv6_type:
+ type: integer
+ constraints:
+ - in_range: [ 0, 255]
+ required: false
+ icmpv6_code:
+ type: integer
+ constraints:
+ - in_range: [ 0, 7]
+ required: false
+ ipv6_nd_target:
+ type: string
+ required: false
+ ipv6_nd_sll:
+ type: string
+ required: false
+ ipv6_nd_tll:
+ type: string
+ required: false
+
+
+ tosca.datatypes.nfv.riftio.vnf_configuration:
+ properties:
+ config_type:
+ type: string
+ description: >-
+ Type of the configuration agent to use
+ constraints:
+ - valid_values: [script, netconf, rest, juju]
+ config_details:
+ type: map
+ description: >-
+ Specify the details for the config agent, like
+ script type, juju charm to use, etc.
+ config_template:
+ required: false
+ type: string
+ config_delay:
+ type: integer
+ constraints:
+ - greater_or_equal: 0
+ default: 0
+ required: false
+ config_priority:
+ type: integer
+ constraints:
+ - greater_than: 0
+
+ tosca.datatypes.nfv.riftio.parameter_value:
+ properties:
+ name:
+ type: string
+ description: Name of the parameter
+ value:
+ type: string
+ description: Value of the parameter
+
+ tosca.datatypes.nfv.riftio.config_primitive:
+ properties:
+ name:
+ type: string
+ seq:
+ type: integer
+ description: >-
+ Order in which to apply, when multiple ones are defined
+ default: 0
+ constraints:
+ - greater_or_equal: 0
+ parameter:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.parameter_value
+ user_defined_script:
+ type: string
+ tosca.datatypes.nfv.riftio.primitive_parameter:
+ properties:
+ data_type:
+ type: string
+ description: >-
+ Data type associated with the name
+ constraints:
+ - valid_values: [string, integer, boolean]
+ mandatory:
+ type: boolean
+ description: >-
+ If this field is mandatory
+ default: false
+ required: false
+ default_value:
+ type: string
+ description: >-
+ The default value for this field
+ required: false
+ parameter_pool:
+ type: string
+ description: >-
+ Parameter pool name to use for this parameter
+ required: false
+ read_only:
+ type: boolean
+ description: >-
+ The value should be greyed out by the UI.
+ Only applies to parameters with default values.
+ required: false
+ default: false
+ hidden:
+ type: boolean
+ description: >-
+ The field should be hidden by the UI.
+ Only applies to parameters with default values.
+ required: false
+ default: false
+ tosca.datatypes.nfv.riftio.primitive_parameter_group:
+ properties:
+ name:
+ type: string
+ description: >-
+ Name of the parameter group
+ mandatory:
+ type: boolean
+ description: >-
+ If this group is mandatory
+ default: false
+ required: false
+ parameter:
+ type: map
+ description: >-
+ List of parameters for the service primitive
+ entry_schema: osca.datatypes.riftio.primitive_parameter
+
+ tosca.datatypes.nfv.riftio.vnf_primitive_group:
+ properties:
+ vnf_name:
+ type: string
+ description: >-
+ Name of the VNF in the NS
+ primitive:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Index and name of the primitive
+
+
+capability_types:
+ tosca.capabilities.nfv.riftio.mgmt_interface:
+ derived_from: tosca.capabilities.Endpoint
+ properties:
+ static_ip:
+ type: string
+ required: false
+ description: >-
+ Specifies the static IP address for managing the VNF
+ connection_point:
+ type: string
+ required: false
+ description: >-
+ Use the ip address associated with this connection point
+ dashboard_params:
+ type: tosca.datatypes.nfv.riftio.dashboard_params
+ required: false
+ description: >-
+ Parameters for the VNF dashboard
+ tosca.capabilities.nfv.riftio.monitoring_param:
+ derived_from: tosca.capabilities.nfv.Metric
+ properties:
+ name:
+ type: string
+ required: false
+ description:
+ type: string
+ required: false
+ protocol:
+ type: string
+ default: http
+ constraints:
+ - equal: http
+ polling_interval:
+ type: scalar-unit.time
+ description: >-
+ The HTTP polling interval in seconds
+ default: 2 s
+ username:
+ type: string
+ description: >-
+ The HTTP basic auth username
+ required: false
+ password:
+ type: string
+ description: >-
+ The HTTP basic auth password
+ required: false
+ method:
+ type: string
+ description: >-
+ This is the method to be performed at the uri.
+ GET by default for action
+ default: get
+ constraints:
+ - valid_values: [post, put, get, delete, options, patch]
+ headers:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Custom HTTP headers to put on HTTP request
+ required: false
+ json_query_method:
+ type: string
+ description: >-
+ The method to extract a value from a JSON response
+ namekey - Use the name as the key for a non-nested value.
+ jsonpath - Use jsonpath-rw implemenation to extract a value.
+ objectpath - Use objectpath implemenation to extract a value.
+ constraints:
+ - valid_values: [namekey, jsonpath, objectpath]
+ default: namekey
+ json_query_path:
+ type: string
+ description: >-
+ The json path to use to extract value from JSON structure
+ required: false
+ json_object_path:
+ type: string
+ description: >-
+ The object path to use to extract value from JSON structure
+ required: false
+ ui_data:
+ type: tosca.datatypes.nfv.riftio.monitoring_param_ui
+ required: false
+ constraints:
+ type: tosca.datatypes.nfv.riftio.monitoring_param_value
+ required: false
+ tosca.capabilities.nfv.riftio.numa_extension:
+ derived_from: tosca.capabilities.Root
+ properties:
+ node_cnt:
+ type: integer
+ description: >-
+ The number of numa nodes to expose to the VM
+ constraints:
+ - greater_or_equal: 0
+ mem_policy:
+ type: string
+ description: >-
+ This policy specifies how the memory should
+ be allocated in a multi-node scenario.
+ STRICT - The memory must be allocated
+ strictly from the memory attached
+ to the NUMA node.
+ PREFERRED - The memory should be allocated
+ preferentially from the memory
+ attached to the NUMA node
+ constraints:
+ - valid_values: [strict, preferred, STRICT, PREFERRED]
+ node:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.compute.riftio.numa
+ tosca.capabilities.nfv.riftio.vswitch_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ ovs_acceleration:
+ type: string
+ description: |-
+ Specifies Open vSwitch acceleration mode.
+ MANDATORY - OVS acceleration is required
+ PREFERRED - OVS acceleration is preferred
+ constraints:
+ - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+ ovs_offload:
+ type: string
+ description: |-
+ Specifies Open vSwitch hardware offload mode.
+ MANDATORY - OVS offload is required
+ PREFERRED - OVS offload is preferred
+ constraints:
+ - valid_values: [mandatory, preferred, disabled, MANDATORY, PREFERRED, DISABLED]
+
+ tosca.capabilities.nfv.riftio.hypervisor_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ type:
+ type: string
+ description: |-
+ Specifies the type of hypervisor.
+ constraints:
+ - valid_values: [prefer_kvm, require_kvm, PREFER_KVM, REQUIRE_KVM]
+ version:
+ type: string
+
+ tosca.capabilities.nfv.riftio.host_epa:
+ derived_from: tosca.capabilities.Root
+ properties:
+ cpu_model:
+ type: string
+ description: >-
+ Host CPU model. Examples include SandyBridge,
+ IvyBridge, etc.
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_westmere
+ - require_westmere
+ - prefer_sandbridge
+ - require_sandybridge
+ - prefer_ivybridge
+ - require_ivybridge
+ - prefer_haswell
+ - require_haswell
+ - prefer_broadwell
+ - require_broadwell
+ - prefer_nehalem
+ - require_nehalem
+ - prefer_penryn
+ - require_penryn
+ - prefer_conroe
+ - require_conroe
+ - prefer_core2duo
+ - require_core2duo
+ - PREFER_WESTMERE
+ - REQUIRE_WESTMERE
+ - PREFER_SANDBRIDGE
+ - REQUIRE_SANDYBRIDGE
+ - PREFER_IVYBRIDGE
+ - REQUIRE_IVYBRIDGE
+ - PREFER_HASWELL
+ - REQUIRE_HASWELL
+ - PREFER_BROADWELL
+ - REQUIRE_BROADWELL
+ - PREFER_NEHALEM
+ - REQUIRE_NEHALEM
+ - PREFER_PENRYN
+ - REQUIRE_PENRYN
+ - PREFER_CONROE
+ - REQUIRE_CONROE
+ - PREFER_CORE2DUO
+ - REQUIRE_CORE2DUO
+ cpu_arch:
+ type: string
+ description: >-
+ Host CPU architecture
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_x86
+ - require_x86
+ - prefer_x86_64
+ - require_x86_64
+ - prefer_i686
+ - require_i686
+ - prefer_ia64
+ - require_ia64
+ - prefer_armv7
+ - require_armv7
+ - prefer_armv8
+ - require_armv8
+ - PREFER_X86
+ - REQUIRE_X86
+ - PREFER_X86_64
+ - REQUIRE_X86_64
+ - PREFER_I686
+ - REQUIRE_I686
+ - PREFER_IA64
+ - REQUIRE_IA64
+ - PREFER_ARMV7
+ - REQUIRE_ARMV7
+ - PREFER_ARMV8
+ - REQUIRE_ARMV8
+ cpu_vendor:
+ type: string
+ description: >-
+ Host CPU vendor
+ required: false
+ constraints:
+ - valid_values:
+ - prefer_intel
+ - require_intel
+ - prefer_amd
+ - requie_amd
+ - PREFER_INTEL
+ - REQUIRE_INTEL
+ - PREFER_AMD
+ - REQUIE_AMD
+ cpu_socket_count:
+ type: integer
+ description: >-
+ Number of sockets on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_core_count:
+ type: integer
+ description: >-
+ Number of cores on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_core_thread_count:
+ type: integer
+ description: >-
+ Number of threads per core on the host
+ required: false
+ constraints:
+ - greater_than : 0
+ cpu_feature:
+ type: list
+ entry_schema:
+ type: string
+ description: |-
+ Enumeration for CPU features.
+
+ AES- CPU supports advanced instruction set for
+ AES (Advanced Encryption Standard).
+
+ CAT- Cache Allocation Technology (CAT) allows
+ an Operating System, Hypervisor, or similar
+ system management agent to specify the amount
+ of L3 cache (currently the last-level cache
+ in most server and client platforms) space an
+ application can fill (as a hint to hardware
+ functionality, certain features such as power
+ management may override CAT settings).
+
+ CMT- Cache Monitoring Technology (CMT) allows
+ an Operating System, Hypervisor, or similar
+ system management agent to determine the
+ usage of cache based on applications running
+ on the platform. The implementation is
+ directed at L3 cache monitoring (currently
+ the last-level cache in most server and
+ client platforms).
+
+ DDIO- Intel Data Direct I/O (DDIO) enables
+ Ethernet server NICs and controllers talk
+ directly to the processor cache without a
+ detour via system memory. This enumeration
+ specifies if the VM requires a DDIO
+ capable host.
+ required: false
+ constraints:
+ -valid_values:
+ - prefer_aes
+ - require_aes
+ - prefer_cat
+ - require_cat
+ - prefer_cmt
+ - require_cmt
+ - prefer_ddio
+ - require_ddio
+ - prefer_vme
+ - require_vme
+ - prefer_de
+ - require_de
+ - prefer_pse
+ - require_pse
+ - prefer_tsc
+ - require_tsc
+ - prefer_msr
+ - require_msr
+ - prefer_pae
+ - require_pae
+ - prefer_mce
+ - require_mce
+ - prefer_cx8
+ - require_cx8
+ - prefer_apic
+ - require_apic
+ - prefer_sep
+ - require_sep
+ - prefer_mtrr
+ - require_mtrr
+ - prefer_pge
+ - require_pge
+ - prefer_mca
+ - require_mca
+ - prefer_cmov
+ - require_cmov
+ - prefer_pat
+ - require_pat
+ - prefer_pse36
+ - require_pse36
+ - prefer_clflush
+ - require_clflush
+ - prefer_dts
+ - require_dts
+ - prefer_acpi
+ - require_acpi
+ - prefer_mmx
+ - require_mmx
+ - prefer_fxsr
+ - require_fxsr
+ - prefer_sse
+ - require_sse
+ - prefer_sse2
+ - require_sse2
+ - prefer_ss
+ - require_ss
+ - prefer_ht
+ - require_ht
+ - prefer_tm
+ - require_tm
+ - prefer_ia64
+ - require_ia64
+ - prefer_pbe
+ - require_pbe
+ - prefer_rdtscp
+ - require_rdtscp
+ - prefer_pni
+ - require_pni
+ - prefer_pclmulqdq
+ - require_pclmulqdq
+ - prefer_dtes64
+ - require_dtes64
+ - prefer_monitor
+ - require_monitor
+ - prefer_ds_cpl
+ - require_ds_cpl
+ - prefer_vmx
+ - require_vmx
+ - prefer_smx
+ - require_smx
+ - prefer_est
+ - require_est
+ - prefer_tm2
+ - require_tm2
+ - prefer_ssse3
+ - require_ssse3
+ - prefer_cid
+ - require_cid
+ - prefer_fma
+ - require_fma
+ - prefer_cx16
+ - require_cx16
+ - prefer_xtpr
+ - require_xtpr
+ - prefer_pdcm
+ - require_pdcm
+ - prefer_pcid
+ - require_pcid
+ - prefer_dca
+ - require_dca
+ - prefer_sse4_1
+ - require_sse4_1
+ - prefer_sse4_2
+ - require_sse4_2
+ - prefer_x2apic
+ - require_x2apic
+ - prefer_movbe
+ - require_movbe
+ - prefer_popcnt
+ - require_popcnt
+ - prefer_tsc_deadline_timer
+ - require_tsc_deadline_timer
+ - prefer_xsave
+ - require_xsave
+ - prefer_avx
+ - require_avx
+ - prefer_f16c
+ - require_f16c
+ - prefer_rdrand
+ - require_rdrand
+ - prefer_fsgsbase
+ - require_fsgsbase
+ - prefer_bmi1
+ - require_bmi1
+ - prefer_hle
+ - require_hle
+ - prefer_avx2
+ - require_avx2
+ - prefer_smep
+ - require_smep
+ - prefer_bmi2
+ - require_bmi2
+ - prefer_erms
+ - require_erms
+ - prefer_invpcid
+ - require_invpcid
+ - prefer_rtm
+ - require_rtm
+ - prefer_mpx
+ - require_mpx
+ - prefer_rdseed
+ - require_rdseed
+ - prefer_adx
+ - require_adx
+ - prefer_smap
+ - require_smap
+ - PREFER_AES
+ - REQUIRE_AES
+ - PREFER_CAT
+ - REQUIRE_CAT
+ - PREFER_CMT
+ - REQUIRE_CMT
+ - PREFER_DDIO
+ - REQUIRE_DDIO
+ - PREFER_VME
+ - REQUIRE_VME
+ - PREFER_DE
+ - REQUIRE_DE
+ - PREFER_PSE
+ - REQUIRE_PSE
+ - PREFER_TSC
+ - REQUIRE_TSC
+ - PREFER_MSR
+ - REQUIRE_MSR
+ - PREFER_PAE
+ - REQUIRE_PAE
+ - PREFER_MCE
+ - REQUIRE_MCE
+ - PREFER_CX8
+ - REQUIRE_CX8
+ - PREFER_APIC
+ - REQUIRE_APIC
+ - PREFER_SEP
+ - REQUIRE_SEP
+ - PREFER_MTRR
+ - REQUIRE_MTRR
+ - PREFER_PGE
+ - REQUIRE_PGE
+ - PREFER_MCA
+ - REQUIRE_MCA
+ - PREFER_CMOV
+ - REQUIRE_CMOV
+ - PREFER_PAT
+ - REQUIRE_PAT
+ - PREFER_PSE36
+ - REQUIRE_PSE36
+ - PREFER_CLFLUSH
+ - REQUIRE_CLFLUSH
+ - PREFER_DTS
+ - REQUIRE_DTS
+ - PREFER_ACPI
+ - REQUIRE_ACPI
+ - PREFER_MMX
+ - REQUIRE_MMX
+ - PREFER_FXSR
+ - REQUIRE_FXSR
+ - PREFER_SSE
+ - REQUIRE_SSE
+ - PREFER_SSE2
+ - REQUIRE_SSE2
+ - PREFER_SS
+ - REQUIRE_SS
+ - PREFER_HT
+ - REQUIRE_HT
+ - PREFER_TM
+ - REQUIRE_TM
+ - PREFER_IA64
+ - REQUIRE_IA64
+ - PREFER_PBE
+ - REQUIRE_PBE
+ - PREFER_RDTSCP
+ - REQUIRE_RDTSCP
+ - PREFER_PNI
+ - REQUIRE_PNI
+ - PREFER_PCLMULQDQ
+ - REQUIRE_PCLMULQDQ
+ - PREFER_DTES64
+ - REQUIRE_DTES64
+ - PREFER_MONITOR
+ - REQUIRE_MONITOR
+ - PREFER_DS_CPL
+ - REQUIRE_DS_CPL
+ - PREFER_VMX
+ - REQUIRE_VMX
+ - PREFER_SMX
+ - REQUIRE_SMX
+ - PREFER_EST
+ - REQUIRE_EST
+ - PREFER_TM2
+ - REQUIRE_TM2
+ - PREFER_SSSE3
+ - REQUIRE_SSSE3
+ - PREFER_CID
+ - REQUIRE_CID
+ - PREFER_FMA
+ - REQUIRE_FMA
+ - PREFER_CX16
+ - REQUIRE_CX16
+ - PREFER_XTPR
+ - REQUIRE_XTPR
+ - PREFER_PDCM
+ - REQUIRE_PDCM
+ - PREFER_PCID
+ - REQUIRE_PCID
+ - PREFER_DCA
+ - REQUIRE_DCA
+ - PREFER_SSE4_1
+ - REQUIRE_SSE4_1
+ - PREFER_SSE4_2
+ - REQUIRE_SSE4_2
+ - PREFER_X2APIC
+ - REQUIRE_X2APIC
+ - PREFER_MOVBE
+ - REQUIRE_MOVBE
+ - PREFER_POPCNT
+ - REQUIRE_POPCNT
+ - PREFER_TSC_DEADLINE_TIMER
+ - REQUIRE_TSC_DEADLINE_TIMER
+ - PREFER_XSAVE
+ - REQUIRE_XSAVE
+ - PREFER_AVX
+ - REQUIRE_AVX
+ - PREFER_F16C
+ - REQUIRE_F16C
+ - PREFER_RDRAND
+ - REQUIRE_RDRAND
+ - PREFER_FSGSBASE
+ - REQUIRE_FSGSBASE
+ - PREFER_BMI1
+ - REQUIRE_BMI1
+ - PREFER_HLE
+ - REQUIRE_HLE
+ - PREFER_AVX2
+ - REQUIRE_AVX2
+ - PREFER_SMEP
+ - REQUIRE_SMEP
+ - PREFER_BMI2
+ - REQUIRE_BMI2
+ - PREFER_ERMS
+ - REQUIRE_ERMS
+ - PREFER_INVPCID
+ - REQUIRE_INVPCID
+ - PREFER_RTM
+ - REQUIRE_RTM
+ - PREFER_MPX
+ - REQUIRE_MPX
+ - PREFER_RDSEED
+ - REQUIRE_RDSEED
+ - PREFER_ADX
+ - REQUIRE_ADX
+ - PREFER_SMAP
+ - REQUIRE_SMAP
+ om_cpu_model_string:
+ type: string
+ description: >-
+ Openmano CPU model string
+ required: false
+ om_cpu_feature:
+ type: list
+ entry_schema:
+ type: string
+ description: >-
+ List of openmano CPU features
+ required: false
+
+ tosca.capabilities.nfv.riftio.sfc:
+ derived_from: tosca.capabilities.Root
+ description: >-
+ Service Function Chaining support on this VDU
+ properties:
+ sfc_type:
+ type: string
+ description: >-
+ Type of node in Service Function Chaining Architecture
+ constraints:
+ - valid_values: [unaware, classifier, sf, sff, UNAWARE, CLASSIFIER, SF, SFF]
+ default: unaware
+ sf_type:
+ type: string
+ description: >-
+ Type of Service Function.
+ NOTE- This needs to map with Service Function Type in ODL to
+ support VNFFG. Service Function Type is manadatory param in ODL
+ SFC.
+ required: false
+ tosca.capabilities.Compute.Container.Architecture:
+ derived_from: tosca.capabilities.Container
+ properties:
+ mem_page_size:
+ type: string
+ description: >-
+ Memory page allocation size. If a VM requires
+ hugepages, it should choose huge or size_2MB
+ or size_1GB. If the VM prefers hugepages, it
+ should chose prefer_huge.
+ huge/large - Require hugepages (either 2MB or 1GB)
+ normal - Does not require hugepages
+ size_2MB - Requires 2MB hugepages
+ size_1GB - Requires 1GB hugepages
+ prefer_huge - Application perfers hugepages
+ NOTE - huge and normal is only defined in standards as of
+ now.
+ required: false
+ constraints:
+ - valid_values: [normal, large, huge, size_2MB, size_1GB, prefer_huge, NORMAL,LARGE, HUGE, SIZE_2MB, SIZE_1GB, PREFER_HUGE]
+ cpu_allocation:
+ type: tosca.datatypes.compute.Container.Architecture.CPUAllocation
+ required: false
+ numa_nodes:
+ type: map
+ required: false
+ entry_schema:
+ type: tosca.datatypes.compute.Container.Architecture.NUMA
+
+
+node_types:
+ tosca.nodes.nfv.riftio.VDU1:
+ derived_from: tosca.nodes.nfv.VDU
+ properties:
+ description:
+ type: string
+ required: false
+ image:
+ description: >-
+ If an image is specified here, it is assumed that the image
+ is already present in the RO or VIM and not in the package.
+ type: string
+ required: false
+ image_checksum:
+ type: string
+ description: >-
+ Image checksum for the image in RO or VIM.
+ required: false
+ cloud_init:
+ description: >-
+ Inline cloud-init specification
+ required: false
+ type: string
+ count:
+ default: 1
+ type: integer
+ capabilities:
+ virtualLink:
+ type: tosca.capabilities.nfv.VirtualLinkable
+ monitoring_param_1:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ mgmt_interface:
+ type: tosca.capabilities.nfv.riftio.mgmt_interface
+ monitoring_param:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ numa_extension:
+ type: tosca.capabilities.nfv.riftio.numa_extension
+ vswitch_epa:
+ type: tosca.capabilities.nfv.riftio.vswitch_epa
+ hypervisor_epa:
+ type: tosca.capabilities.nfv.riftio.hypervisor_epa
+ host_epa:
+ type: tosca.capabilities.nfv.riftio.host_epa
+ tosca.nodes.nfv.riftio.CP1:
+ derived_from: tosca.nodes.nfv.CP
+ properties:
+ cp_type:
+ description: Type of the connection point
+ type: string
+ default: VPORT
+ constraints:
+ - valid_values: [VPORT]
+ name:
+ description: Name of the connection point
+ type: string
+ required: false
+ vdu_intf_name:
+ description: Name of the interface on VDU
+ type: string
+ vdu_intf_type:
+ description: >-
+ Specifies the type of virtual interface
+ between VM and host.
+ VIRTIO - Use the traditional VIRTIO interface.
+ PCI-PASSTHROUGH - Use PCI-PASSTHROUGH interface.
+ SR-IOV - Use SR-IOV interface.
+ E1000 - Emulate E1000 interface.
+ RTL8139 - Emulate RTL8139 interface.
+ PCNET - Emulate PCNET interface.
+ OM-MGMT - Used to specify openmano mgmt external-connection type
+ type: string
+ constraints:
+ - valid_values: [OM-MGMT, VIRTIO, E1000, SR-IOV]
+ bandwidth:
+ type: integer
+ description: Aggregate bandwidth of the NIC
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ vpci:
+ type: string
+ description: >-
+ Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.
+ required: false
+ capabilities:
+ sfc:
+ type: tosca.capabilities.nfv.riftio.sfc
+ tosca.nodes.nfv.riftio.VNF1:
+ derived_from: tosca.nodes.nfv.VNF
+ properties:
+ member_index:
+ type: integer
+ constraints:
+ - greater_or_equal: 1
+ description: Index of the VNF in the NS
+ required: false
+ start_by_default:
+ type: boolean
+ default: true
+ description: Start this VNF on NS instantiate
+ logo:
+ type: string
+ description: >-
+ Logo to display with the VNF in the orchestrator
+ required: false
+ capabilities:
+ mgmt_interface:
+ type: tosca.capabilities.nfv.riftio.mgmt_interface
+ monitoring_param:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ sfc:
+ type: tosca.capabilities.nfv.riftio.sfc
+ tosca.nodes.nfv.riftio.ELAN:
+ derived_from: tosca.nodes.nfv.VL.ELAN
+ properties:
+ description:
+ type: string
+ required: false
+ network_name:
+ type: string
+ description: >-
+ Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.
+ required: false
+ root_bandwidth:
+ type: integer
+ description: >-
+ This is the aggregate bandwidth
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ leaf_bandwidth:
+ type: integer
+ description: >-
+ This is the bandwidth of branches
+ constraints:
+ - greater_or_equal: 0
+ required: false
+ tosca.nodes.nfv.riftio.FP1:
+ derived_from: tosca.nodes.nfv.FP
+ properties:
+ id:
+ type: integer
+ required: false
+ policy:
+ type: tosca.nfv.datatypes.policyType
+ required: true
+ description: policy to use to match traffic for this FP
+ path:
+ type: list
+ required: true
+ entry_schema:
+ type: tosca.nfv.datatypes.pathType
+ cp:
+ type: tosca.nfv.datatypes.pathType
+ required: true
+
+
+
+artifact_types:
+ tosca.artifacts.Deployment.riftio.cloud_init_file:
+ derived_from: tosca.artifacts.Deployment
+ file:
+ type: string
+
+ tosca.artifacts.Deployment.Image.riftio.QCOW2:
+ derived_from: tosca.artifacts.Deployment.Image.VM.QCOW2
+ image_checksum:
+ required: false
+ type: string
+
+group_types:
+ tosca.groups.nfv.VNFFG:
+ derived_from: tosca.groups.Root
+ properties:
+ vendor:
+ type: string
+ required: true
+ description: name of the vendor who generate this VNFFG
+ version:
+ type: string
+ required: true
+ description: version of this VNFFG
+ number_of_endpoints:
+ type: integer
+ required: true
+ description: count of the external endpoints included in this VNFFG
+ dependent_virtual_link:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to a VLD used in this Forwarding Graph
+ connection_point:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to Connection Points forming the VNFFG
+ constituent_vnfs:
+ type: list
+ entry_schema:
+ type: string
+ required: true
+ description: Reference to a list of VNFD used in this VNF Forwarding Graph
+ members: [ tosca.nodes.nfv.FP ]
+
+ tosca.groups.nfv.riftio.scaling:
+ derived_from: tosca.groups.Root
+ properties:
+ name:
+ type: string
+ min_instances:
+ type: integer
+ description: >-
+ Minimum instances of the scaling group which are allowed.
+ These instances are created by default when the network service
+ is instantiated.
+ max_instances:
+ type: integer
+ description: >-
+ Maximum instances of this scaling group that are allowed
+ in a single network service. The network service scaling
+ will fail, when the number of service group instances
+ exceed the max-instance-count specified.
+ cooldown_time:
+ type: integer
+ description: >-
+ The duration after a scaling-in/scaling-out action has been
+ triggered, for which there will be no further optional
+ ratio:
+ type: map
+ entry_schema:
+ type: integer
+ description: >-
+ Specify the number of instances of each VNF to instantiate
+ for a scaling action
+ members: [tosca.nodes.nfv.VNF]
+ interfaces:
+ action:
+ type: tosca.interfaces.nfv.riftio.scaling.action
+
+interface_types:
+ tosca.interfaces.nfv.riftio.scaling.action:
+ pre_scale_in:
+ description: Operation to execute before a scale in
+ post_scale_in:
+ description: Operation to execute after a scale in
+ pre_scale_out:
+ description: Operation to execute before a scale out
+ post_scale_out:
+ description: Operation to execute after a scale out
+
+policy_types:
+ tosca.policies.nfv.riftio.placement:
+ derived_from: tosca.policies.Placement
+ properties:
+ name:
+ type: string
+ description: >-
+ Place group construct to define the compute resource placement strategy
+ in cloud environment
+ requirement:
+ type: string
+ description: >-
+ This is free text space used to describe the intent/rationale
+ behind this placement group. This is for human consumption only
+ strategy:
+ type: string
+ description: >-
+ Strategy associated with this placement group
+ Following values are possible
+ COLOCATION - Colocation strategy imply intent to share the physical
+ infrastructure (hypervisor/network) among all members
+ of this group.
+ ISOLATION - Isolation strategy imply intent to not share the physical
+ infrastructure (hypervisor/network) among the members
+ of this group.
+ constraints:
+ valid_values:
+ - COLOCATION
+ - ISOLATION
+ tosca.policies.nfv.riftio.vnf_configuration:
+ derived_from: tosca.policies.Root
+ properties:
+ config:
+ type: tosca.datatypes.nfv.riftio.vnf_configuration
+ initial_config:
+ type: list
+ entry_schema:
+ type: tosca.datatypes.nfv.riftio.config_primitive
+ tosca.policies.nfv.riftio.vnf_service_primitives:
+ derived_from: tosca.policies.Root
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: primitive_parameter
+ tosca.policies.nfv.riftio.ns_service_primitives:
+ derived_from: tosca.policies.Root
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: primitive_parameter
+ parameter_group:
+ type: tosca.datatypes.nfv.riftio.primitive_parameter_group
+ description: >-
+ Grouping of parameters which are logically grouped in UI
+ required: false
+ vnf_primitive_group:
+ type: tosca.datatypes.nfv.riftio.vnf_primitive_group
+ description: >-
+ List of service primitives grouped by VNF
+ required: false
+ user_defined_script:
+ type: string
+ description: >-
+ A user defined script
+ required: false
+ tosca.policies.nfv.riftio.initial_config_primitive:
+ derived_from: tosca.policies.Root
+ properties:
+ name:
+ type: string
+ seq:
+ type: integer
+ description: >-
+ Order in which to apply, when multiple ones are defined
+ default: 0
+ constraints:
+ - greater_or_equal: 0
+ parameter:
+ type: map
+ entry_schema:
+ type: string
+ user_defined_script:
+ type: string
+ tosca.policies.nfv.riftio.users:
+ derived_from: tosca.policies.Root
+ description: >-
+ Specify list of public keys to be injected as
+ part of NS instantitation. Use default as entry,
+ to specify the key pairs for default user.
+ properties:
+ user_info:
+ type: string
+ description: >-
+ The user\'s real name
+ required: false
+ key_pairs:
+ type: map
+ description: >-
+ List of public keys for the user
+ entry_schema:
+ type: string
+ required: true
+ tosca.policies.nfv.riftio.dependency:
+ derived_from: tosca.policies.Root
+ description: >-
+ Map dependency between VDUs or VNFs
+ properties:
+ parameter:
+ type: map
+ entry_schema:
+ type: string
+ description: >-
+ Parameter and value for the config
+ tosca.nfv.datatypes.policyType:
+ properties:
+ type:
+ type: string
+ required: false
+ constraints:
+ - valid_values: [ ACL ]
+ criteria:
+ type: list
+ required: true
+ entry_schema:
+ type: tosca.nfv.datatypes.aclType
+
+
+
--- /dev/null
+TOSCA-Meta-File-Version: 1.0
+CSAR-Version: 1.1
+Created-By: RIFT.io
+Entry-Definitions: Definitions/ping_pong_nsd.yaml
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_set_rate(yaml_cfg, logger):
+ '''Use curl and set traffic rate on ping vnf'''
+
+ def set_rate(mgmt_ip, port, rate):
+ curl_cmd = '''curl -D /dev/stdout \
+ -H "Accept: application/vnd.yang.data+xml" \
+ -H "Content-Type: application/vnd.yang.data+json" \
+ -X POST \
+ -d "{{ \\"rate\\":{ping_rate} }}" \
+ http://{ping_mgmt_ip}:{ping_mgmt_port}/api/v1/ping/rate
+'''.format(ping_mgmt_ip=mgmt_ip,
+ ping_mgmt_port=port,
+ ping_rate=rate)
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ subprocess.check_call(curl_cmd, shell=True)
+
+ # Get the ping rate
+ rate = yaml_cfg['parameter']['rate']
+
+ # Set ping rate
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'ping_vnfd' in vnfr['name']:
+ vnf_type = 'ping'
+ port = 18888
+ set_rate(vnfr['mgmt_ip_address'], port, rate)
+ break
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_set_rate-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ # logger.debug("Input YAML file:\n{}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ ping_set_rate(yaml_cfg, logger)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def start_traffic(yaml_cfg, logger):
+ '''Use curl and set admin status to enable on pong and ping vnfs'''
+
+ def enable_service(mgmt_ip, port, vnf_type):
+ curl_cmd = 'curl -D /dev/null -H "Accept: application/vnd.yang.data' \
+ '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ '-X POST -d "{{\\"enable\\":true}}" http://{mgmt_ip}:' \
+ '{mgmt_port}/api/v1/{vnf_type}/adminstatus/state'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=port,
+ vnf_type=vnf_type)
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ subprocess.check_call(curl_cmd, shell=True)
+
+ # Enable pong service first
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'pong_vnfd' in vnfr['name']:
+ vnf_type = 'pong'
+ port = 18889
+ enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+ # Add a delay to provide pong port to come up
+ time.sleep(0.1)
+
+ # Enable ping service next
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'ping_vnfd' in vnfr['name']:
+ vnf_type = 'ping'
+ port = 18888
+ enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_pong_start_traffic-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ # logger.debug("Input YAML file:\n{}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ start_traffic(yaml_cfg, logger)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
tosca_helloworld = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- "data/tosca_helloworld.yaml")
+ "data/tosca_helloworld_nfv.yaml")
template_file = '--template-file=' + tosca_helloworld
template_validation = "--validate-only"
debug="--debug"
(self.template_file,
'--parameters=key'))
+ @unittest.skip
def test_valid_template(self):
try:
shell.main([self.template_file])
self.log.exception(e)
self.fail(self.failure_msg)
+ @unittest.skip
def test_validate_only(self):
try:
shell.main([self.template_file,
test_base_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'data')
template_file = os.path.join(test_base_dir,
- "ping_pong_csar/Definitions/ping_pong_nsd.yaml")
+ "tosca_ping_pong_epa/Definitions/ping_pong_nsd.yaml")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
shutil.rmtree(temp_dir)
else:
self.log.warn("Generated desc in {}".format(temp_dir))
+
def test_input_csar(self):
test_base_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data')
- template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+ template_file = os.path.join(test_base_dir, "tosca_ping_pong_epa.zip")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
shutil.rmtree(temp_dir)
else:
self.log.warn("Generated desc in {}".format(temp_dir))
-
+
+ @unittest.skip
def test_input_csar_no_gi(self):
test_base_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data')
- template_file = os.path.join(test_base_dir, "ping_pong_csar.zip")
+ template_file = os.path.join(test_base_dir, "tosca_ping_pong_epa.zip")
template = '--template-file='+template_file
temp_dir = tempfile.mkdtemp()
output_dir = "--output-dir=" + temp_dir
--- /dev/null
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import asyncio
+import logging
+from time import sleep
+
+import gi
+gi.require_version('RwProjectManoYang', '1.0')
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+ RwProjectManoYang,
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+)
+
+import rift.tasklets
+
+
+class ManoProjectError(Exception):
+ pass
+
+
+class ManoProjNameSetErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNoProjErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathKeyErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathNotRootErr(ManoProjectError):
+ pass
+
+
+class ManoProjXpathPresentErr(ManoProjectError):
+ pass
+
+
+NS = 'rw-project'
+PROJECT = 'project'
+NS_PROJECT = '{}:{}'.format(NS, PROJECT)
+XPATH = '/{}'.format(NS_PROJECT)
+XPATH_LEN = len(XPATH)
+
+NAME = 'name'
+NAME_LEN = len(NAME)
+NS_NAME = '{}:{}'.format(NS, NAME)
+
+DEFAULT_PROJECT = 'default'
+DEFAULT_PREFIX = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ DEFAULT_PROJECT)
+
+
+class ManoProject(object):
+ '''Class to handle the project name'''
+
+ log = None
+
+ @classmethod
+ def instance_from_xpath(cls, xpath, log):
+ name = cls.from_xpath(xpath, log)
+ if name is None:
+ return None
+
+ proj = ManoProject(log, name=name)
+ return proj
+
+ @classmethod
+ def from_xpath(cls, xpath, log):
+ log.debug("Get project name from {}".format(xpath));
+
+ if XPATH in xpath:
+ idx = xpath.find(XPATH)
+ if idx == -1:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ sub = xpath[idx+XPATH_LEN:].strip()
+ if (len(sub) < NAME_LEN) or (sub[0] != '['):
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[1:].strip()
+ idx = sub.find(NS_NAME)
+ if idx == -1:
+ idx = sub.find(NAME)
+ if idx != 0:
+ msg = "Project name not found in XPath: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ idx = sub.find(']')
+ if idx == -1:
+ msg = "XPath is invalid: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr(msg)
+
+ sub = sub[:idx].strip()
+ try:
+ log.debug("Key and value found: {}".format(sub))
+ k, n = sub.split("=", 2)
+ name = n.strip(' \'"')
+ if name is None:
+ msg = "Project name is empty in XPath".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathKeyErr (msg)
+
+ log.debug("Found project name {} from XPath {}".
+ format(name, xpath))
+ return name
+
+ except ValueError as e:
+ msg = "Project name not found in XPath: {}, exception: {}" \
+ .format(xpath, e)
+ log.exception(msg)
+ raise ManoProjXpathKeyErr(msg)
+ else:
+ msg = "Project not found in XPATH: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNoProjErr(msg)
+
+ @classmethod
+ def get_log(cls):
+ if not cls.log:
+ cls.log = logging.getLogger('rw-mano-log.rw-project')
+ cls.log.setLevel(logging.ERROR)
+
+ @classmethod
+ def prefix_project(cls, xpath, project=None, log=None):
+ if log is None:
+ log = cls.get_log()
+
+ if project is None:
+ project = DEFAULT_PROJECT
+ proj_prefix = DEFAULT_PREFIX
+ else:
+ proj_prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ project)
+
+ prefix = ''
+ suffix = xpath
+ idx = xpath.find('C,/')
+ if idx == -1:
+ idx = xpath.find('D,/')
+
+ suffix = xpath
+ if idx != -1:
+ prefix = xpath[:2]
+ suffix = xpath[2:]
+
+ if suffix[0] != '/':
+ msg = "Non-rooted xpath provided: {}".format(xpath)
+ log.error(msg)
+ raise ManoProjXpathNotRootErr(msg)
+
+ idx = suffix.find(XPATH)
+ if idx == 0:
+ name = cls.from_xpath(xpath, log)
+ if name == project:
+ log.debug("Project already in the XPATH: {}".format(xpath))
+ return xpath
+
+ else:
+ msg = "Different project {} already in XPATH {}". \
+ format(name, xpath)
+ log.error(msg)
+ raise ManoProjXpathPresentErr(msg)
+
+ ret = prefix + proj_prefix + suffix
+ return ret
+
+
+ def __init__(self, log, name=None, tasklet=None):
+ self._log = log
+ self._name = None
+ self._prefix = None
+ self._pbcm = None
+ self._tasklet = None
+ self._dts = None
+ self._loop = None
+ self._log_hdl = None
+
+ # Track if the apply config was received
+ self._apply = False
+
+ if name:
+ self.name = name
+
+ def update(self, tasklet):
+ # Store the commonly used properties from a tasklet
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def pbcm(self):
+ return self._pbcm
+
+ @property
+ def config(self):
+ return self._pbcm.project_config
+
+ @property
+ def tasklet(self):
+ return self._tasklet
+
+ @property
+ def log_hdl(self):
+ return self._log_hdl
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @name.setter
+ def name(self, value):
+ if self._name is None:
+ self._name = value
+ self._prefix = "{}[{}='{}']".format(XPATH,
+ NS_NAME,
+ self._name)
+ self._pbcm = RwProjectManoYang.YangData_RwProject_Project(
+ name=self._name)
+
+ elif self._name == value:
+ self._log.debug("Setting the same name again for project {}".
+ format(value))
+ else:
+ msg = "Project name already set to {}".format(self._name)
+ self._log.error(msg)
+ raise ManoProjNameSetErr(msg)
+
+ def set_from_xpath(self, xpath):
+ self.name = ManoProject.from_xpath(xpath, self._log)
+
+ def add_project(self, xpath):
+ return ManoProject.prefix_project(xpath, log=self._log, project=self._name)
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def delete_prepare(self):
+ self._log.debug("Delete prepare for project {}".format(self._name))
+ return (True, "True")
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def register(self):
+ msg = "Register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ @abc.abstractmethod
+ def deregister(self):
+ msg = "De-register not implemented for project type {}". \
+ format(self.__class__.__name__)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ def rpc_check(self, msg, xact_info=None):
+ '''Check if the rpc is for this project'''
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ project = DEFAULT_PROJECT
+
+ if project != self.name:
+ self._log.debug("Project {}: RPC is for different project {}".
+ format(self.name, project))
+ if xact_info:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return False
+
+ return True
+
+ @asyncio.coroutine
+ def create_project(self, dts):
+ proj_xpath = "C,{}/config".format(self.prefix)
+ self._log.info("Creating project: {} with {}".
+ format(proj_xpath, self.config.as_dict()))
+
+ yield from dts.query_create(proj_xpath,
+ rwdts.XactFlag.ADVISE,
+ self.config)
+
+
+def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
+ #TODO: Check why this is getting called during project delete
+ if not dts_member_reg:
+ return [], [], []
+
+ # Unforunately, it is currently difficult to figure out what has exactly
+ # changed in this xact without Pbdelta support (RIFT-4916)
+ # As a workaround, we can fetch the pre and post xact elements and
+ # perform a comparison to figure out adds/deletes/updates
+ xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
+ curr_cfgs = list(dts_member_reg.elements)
+
+ xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
+ curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
+
+ # Find Adds
+ added_keys = set(xact_key_map) - set(curr_key_map)
+ added_cfgs = [xact_key_map[key] for key in added_keys]
+
+ # Find Deletes
+ deleted_keys = set(curr_key_map) - set(xact_key_map)
+ deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
+
+ # Find Updates
+ updated_keys = set(curr_key_map) & set(xact_key_map)
+ updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
+
+ return added_cfgs, deleted_cfgs, updated_cfgs
+
+
+class ProjectConfigCallbacks(object):
+ def __init__(self,
+ on_add_apply=None, on_add_prepare=None,
+ on_delete_apply=None, on_delete_prepare=None):
+
+ @asyncio.coroutine
+ def prepare_noop(*args, **kwargs):
+ pass
+
+ def apply_noop(*args, **kwargs):
+ pass
+
+ self.on_add_apply = on_add_apply
+ self.on_add_prepare = on_add_prepare
+ self.on_delete_apply = on_delete_apply
+ self.on_delete_prepare = on_delete_prepare
+
+ for f in ('on_add_apply', 'on_delete_apply'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, apply_noop)
+ continue
+
+ if asyncio.iscoroutinefunction(ref):
+ raise ValueError('%s cannot be a coroutine' % (f,))
+
+ for f in ('on_add_prepare', 'on_delete_prepare'):
+ ref = getattr(self, f)
+ if ref is None:
+ setattr(self, f, prepare_noop)
+ continue
+
+ if not asyncio.iscoroutinefunction(ref):
+ raise ValueError("%s must be a coroutine" % f)
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,{}/project-config".format(XPATH)
+
+ def __init__(self, dts, log, callbacks, sub_config=True):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ if sub_config:
+ self.xpath = ProjectDtsHandler.XPATH
+ self._key = 'name_ref'
+ else:
+ self.xpath = "C,{}".format(XPATH)
+ self._key = 'name'
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def add_project(self, name):
+ self._log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name)
+ self.projects.append(name)
+ else:
+ self._log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self._log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, name):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self.add_project(name)
+
+ def register(self):
+ def on_init(acg, xact, scratch):
+ self._log.debug("on_init")
+ scratch["projects"] = {
+ "added": [],
+ "deleted": [],
+ "updated": [],
+ }
+ return scratch
+
+ def readd_projects(xact):
+ self._log.info("Re-add projects")
+
+ for cfg, ks in self._reg.get_xact_elements(xact, include_keyspec=True):
+ xpath = ks.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Got ks {} for cfg {}".format(xpath, cfg.as_dict()))
+ name = ManoProject.from_xpath(xpath, self._log)
+ self._log.debug("Project to add: {}".format(name))
+ self.add_project(name)
+
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s): %s",
+ xact, action, scratch)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ readd_projects(xact)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ try:
+ add_cfgs = scratch["projects"]["added"]
+ except KeyError:
+ add_cfgs = []
+
+ try:
+ del_cfgs = scratch["projects"]["deleted"]
+ except KeyError:
+ del_cfgs = []
+
+ try:
+ update_cfgs = scratch["projects"]["updated"]
+ except KeyError:
+ update_cfgs = []
+
+
+ # Handle Deletes
+ for name in del_cfgs:
+ self.delete_project(name)
+
+ # Handle Adds
+ for name, msg in add_cfgs:
+ self.add_project(name)
+
+ # Handle Updates
+ for name, msg in update_cfgs:
+ self.update_project(name)
+
+ try:
+ del scratch["projects"]
+ except KeyError:
+ pass
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+ xpath = ks_path.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Project xpath: {}".format(xpath))
+ name = ManoProject.from_xpath(xpath, self._log)
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action == rwdts.QueryAction.CREATE:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+ else:
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+ elif action == rwdts.QueryAction.UPDATE:
+ if name in self.projects:
+ scratch["projects"]["updated"].append((name, msg))
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ yield from self._callbacks.on_add_prepare(name)
+ scratch["projects"]["added"].append((name, msg))
+
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc, delete_msg = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted. Reason : {}".
+ format(name, delete_msg))
+
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ ProjectDtsHandler.XPATH,
+ delete_msg)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ scratch["projects"]["deleted"].append(name)
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ on_init=on_init)
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare,
+ )
+
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class, **kw):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+ self._kw = kw
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name):
+ if name not in self._tasklet.projects:
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ task = asyncio.ensure_future(self._get_project(name).register(),
+ loop=self._loop)
+
+ self._log.debug("Project {} register: {}".format(name, str(task)))
+
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+ raise e
+
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+ if name in self._tasklet.projects:
+ self._log.error("Project {} already exists for {}".
+ format(name, self._get_tasklet_name()))
+ return
+
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet, **(self._kw))
+ yield from self._get_project(name).register()
+
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+ raise e
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.debug("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc, delete_msg = yield from self._get_project(name).delete_prepare()
+ return rc, delete_msg
+
+ def register(self):
+ self.project_cfg_handler.register()
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Copyright 2016 RIFT.io Inc
+
+
+import argparse
+import logging
+import os
+import socket
+import stat
+import sys
+import tempfile
+
+from Crypto.PublicKey import RSA
+
+
+class ManoSshKey(object):
+ '''
+ Generate a SSH key pair and store them in a file
+ '''
+
+ def __init__(self, log, size=2048):
+ self._log = log
+ self._size = size
+
+ self._key = None
+ self._key_pem = None
+ self._pub_ssh = None
+ self._key_file = None
+ self._pub_file = None
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def size(self):
+ return self._size
+
+ @property
+ def private_key(self):
+ if self._key is None:
+ self._gen_keys()
+ return self._key_pem
+
+ @property
+ def public_key(self):
+ if self._key is None:
+ self._gen_keys()
+ return self._pub_ssh
+
+ @property
+ def private_key_file(self):
+ return self._key_file
+
+ @property
+ def public_key_file(self):
+ return self._pub_file
+
+ def _gen_keys(self):
+ if self._key:
+ return
+
+ self.log.info("Generating key of size: {}".format(self.size))
+
+ self._key = RSA.generate(self.size, os.urandom)
+ self._key_pem = self._key.exportKey('PEM').decode('utf-8')
+ self.log.debug("Private key PEM: {}".format(self._key_pem))
+
+ # Public key export as 'OpenSSH' has a bug
+ # (https://github.com/dlitz/pycrypto/issues/99)
+
+ username = None
+ try:
+ username = os.getlogin()
+ hostname = socket.getfqdn()
+ except OSError:
+ pass
+
+ pub = self._key.publickey().exportKey('OpenSSH').decode('utf-8')
+ if username:
+ self._pub_ssh = '{} {}@{}'.format(pub, username, hostname)
+ else:
+ self._pub_ssh = pub
+ self.log.debug("Public key SSH: {}".format(self._pub_ssh))
+
+ def write_to_disk(self,
+ name="id_rsa",
+ directory="."):
+ if self._key is None:
+ self._gen_keys()
+
+ path = os.path.abspath(directory)
+ self._pub_file = "{}/{}.pub".format(path, name)
+ self._key_file = "{}/{}.key".format(path, name)
+
+ with open(self._key_file, 'w') as content_file:
+ content_file.write(self.private_key)
+ os.chmod(self._key_file, stat.S_IREAD|stat.S_IWRITE)
+
+ with open(self._pub_file, 'w') as content_file:
+ content_file.write(self.public_key)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Generate SSH key pair')
+ parser.add_argument("-s", "--size", type=int, default=2048, help="Key size")
+ parser.add_argument("-d", "--directory", help="Directory to store the keys")
+ parser.add_argument("-n", "--name", help="Name for the key file")
+ parser.add_argument("--debug", help="Enable debug logging",
+ action="store_true")
+ args = parser.parse_args()
+
+ fmt = logging.Formatter(
+ '%(asctime)-23s %(levelname)-5s (%(name)s@%(process)d:' \
+ '%(filename)s:%(lineno)d) - %(message)s')
+ stderr_handler = logging.StreamHandler(stream=sys.stderr)
+ stderr_handler.setFormatter(fmt)
+ if args.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+ log = logging.getLogger('rw-mano-ssh-keys')
+ log.addHandler(stderr_handler)
+
+ log.info("Args passed: {}".format(args))
+ if args.directory:
+ path = args.directory
+ else:
+ path = tempfile.mkdtemp()
+
+ kp = ManoSshKey(log, size=args.size)
+ kp.write_to_disk(directory=path)
+ log.info("Private Key: {}".format(kp.private_key))
+ log.info("Public key: {}".format(kp.public_key))
+ log.info("Key file: {}, Public file: {}".format(kp.private_key_file,
+ kp.public_key_file))
type: tosca.capabilities.nfv.riftio.hypervisor_epa
host_epa:
type: tosca.capabilities.nfv.riftio.host_epa
+ monitoring_param_2:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_3:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_4:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_5:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_6:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_7:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
+ monitoring_param_8:
+ type: tosca.capabilities.nfv.riftio.monitoring_param
tosca.nodes.nfv.riftio.CP1:
derived_from: tosca.nodes.nfv.CP
properties:
description: >-
A user defined script
required: false
+ name:
+ type: string
+ description: >-
+ Name of primitive
+ required: false
tosca.policies.nfv.riftio.initial_config_primitive:
derived_from: tosca.policies.Root
properties:
# from REQUIRED_FIELDS below
NAME = 'name'
- REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID) = \
- ('description', 'version', 'vendor', 'id')
+ REQUIRED_FIELDS = (DESC, VERSION, VENDOR, ID, LOGO) = \
+ ('description', 'version', 'vendor', 'id', 'logo')
COMMON_FIELDS = (PATH, PORT, HOST, XPATH, TYPE, COUNT, FILE,
NFV_COMPUTE, HOST_EPA, VSWITCH_EPA, HYPERVISOR_EPA, GUEST_EPA) = \
T_ELAN,
T_VNFFG,
T_FP,
+ T_NS_PRIMITIVE,
) = \
('tosca.policies.nfv.riftio.vnf_configuration',
'tosca.capabilities.riftio.http_endpoint_type',
'tosca.nodes.nfv.riftio.ELAN',
'tosca.groups.nfv.VNFFG',
'tosca.nodes.nfv.riftio.FP1',
+ 'tosca.policies.nfv.riftio.ns_service_primitives',
)
SUPPORT_FILES = ( SRC, DEST, EXISTING) = \
('source', 'destination', 'existing')
- SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR, CLOUD_INIT_DIR) = \
- ('images', 'scripts','cloud_init')
+ SUPPORT_DIRS = (IMAGE_DIR, SCRIPT_DIR, CLOUD_INIT_DIR, ICON_DIR) = \
+ ('images', 'scripts','cloud_init', 'icons')
def __init__(self,
log,
# Add all types
types_list = [ToscaResource.DATA_TYPES, ToscaResource.CAPABILITY_TYPES,
- ToscaResource.NODE_TYPES,
+ ToscaResource.NODE_TYPES, ToscaResource.ARTIFACT_TYPES,
ToscaResource.GROUP_TYPES, ToscaResource.POLICY_TYPES]
for typ in types_list:
if typ in tosca:
for nsd in self.yangs[self.NSD]:
self.log.debug(_("Translate descriptor of type nsd: {}").
format(nsd))
+ node_name = nsd.pop(ToscaResource.NAME).replace(' ','_')
+ node_name = node_name if node_name.endswith('nsd') else ''.join([node_name, '_nsd'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.NSD](
self.log,
- nsd.pop(ToscaResource.NAME),
+ node_name,
self.NSD,
nsd,
self.vnfd_files)
self.tosca_resources.append(tosca_node)
+ vnfd_name_list = []
if self.VNFD in self.yangs:
for vnfd in self.yangs[self.VNFD]:
- self.log.debug(_("Translate descriptor of type vnfd: {}").
- format(vnfd))
- tosca_node = TranslateDescriptors. \
- YANG_TO_TOSCA_TYPE[self.VNFD](
- self.log,
- vnfd.pop(ToscaResource.NAME),
- self.VNFD,
- vnfd)
- self.tosca_resources.append(tosca_node)
+ if vnfd['name'] not in vnfd_name_list:
+ self.log.debug(_("Translate descriptor of type vnfd: {}").
+ format(vnfd))
+ vnfd_name_list.append(vnfd['name'])
+ tosca_node = TranslateDescriptors. \
+ YANG_TO_TOSCA_TYPE[self.VNFD](
+ self.log,
+ vnfd.pop(ToscaResource.NAME),
+ self.VNFD,
+ vnfd)
+ self.tosca_resources.append(tosca_node)
# First translate VNFDs
for node in self.tosca_resources:
import ToscaResource
from rift.mano.yang_translator.rwmano.yang.yang_vld import YangVld
from collections import OrderedDict
+import re
TARGET_CLASS_NAME = 'YangNsd'
INITIAL_CFG,) = \
('scaling_group_descriptor', 'service_primitive',
'user_defined_script', 'scaling_config_action',
- 'trigger', 'ns_config_primitive_name_ref',
+ 'trigger', 'ns_service_primitive_name_ref',
'constituent_vnfd', 'vnfd_member',
'min_instance_count', 'max_instance_count',
'input_parameter_xpath', 'config_actions',
- 'initial_config_primitive',)
+ 'initial_config_primitive', )
def __init__(self,
log,
self.conf_prims = []
self.scale_grps = []
self.initial_cfg = []
+ self.service_primitive = []
self.placement_groups = []
self.vnf_id_to_vnf_map = {}
self.vnfd_files = vnfd_files
self.forwarding_paths = {}
self.substitution_mapping_forwarder = []
self.vnfd_sfc_map = None
+ self.duplicate_vnfd_name_list = []
def handle_yang(self, vnfds):
self.log.debug(_("Process NSD desc {0}: {1}").
self.inputs.append({
self.NAME:
self.map_yang_name_to_tosca(
- val.replace('/nsd:nsd-catalog/nsd:nsd/nsd:', ''))})
+ val.replace('/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/nsd:', ''))})
if len(param):
self.log.warn(_("{0}, Did not process the following for "
"input param {1}: {2}").
if key in dic:
icp[key] = dic.pop(key)
- params = {}
+ params = []
if self.PARAM in dic:
for p in dic.pop(self.PARAM):
if (self.NAME in p and
self.VALUE in p):
- params[p[self.NAME]] = p[self.VALUE]
+ params.append({self.NAME: p[self.NAME], self.VALUE:p[self.VALUE]})
else:
# TODO (pjoseph): Need to add support to read the
# config file and get the value from that
self.log.debug(_("{0}, Initial config {1}").format(self, icp))
self.initial_cfg.append({self.PROPERTIES : icp})
+ def process_service_primitive(dic):
+ prop = {}
+ params = []
+ for key in [self.NAME, self.USER_DEF_SCRIPT]:
+ if key in dic:
+ prop[key] = dic.pop(key)
+
+ if self.PARAM in dic:
+ for p in dic.pop(self.PARAM):
+ p_entry = {}
+ for name, value in p.items():
+ p_entry[name] = value
+ params.append(p_entry)
+
+ if len(params):
+ prop[self.PARAM] = params
+
+ conf_prim = {self.NAME: prop[self.NAME], self.DESC : 'TestDescription'}
+ if self.USER_DEF_SCRIPT in prop:
+ conf_prim[self.USER_DEF_SCRIPT] = prop[self.USER_DEF_SCRIPT]
+ self.conf_prims.append(conf_prim)
+
+ self.service_primitive.append({self.PROPERTIES : prop})
+
+
def process_vld(vld, dic):
vld_conf = {}
vld_prop = {}
dic = deepcopy(self.yang)
try:
for key in self.REQUIRED_FIELDS:
- self.props[key] = dic.pop(key)
+ if key in dic:
+ self.props[key] = dic.pop(key)
self.id = self.props[self.ID]
# Process constituent VNFDs
+
+ vnfd_name_list = []
+ member_vnf_index_list = []
if self.CONST_VNFD in dic:
for cvnfd in dic.pop(self.CONST_VNFD):
- process_const_vnfd(cvnfd)
+ if cvnfd[self.VNFD_ID_REF] not in member_vnf_index_list:
+ member_vnf_index_list.append(cvnfd[self.VNFD_ID_REF])
+ process_const_vnfd(cvnfd)
+ else:
+ self.duplicate_vnfd_name_list.append(self.vnf_id_to_vnf_map[cvnfd[self.VNFD_ID_REF]])
# Process VLDs
if self.VLD in dic:
process_vnffgd(dic[self.VNFFGD], dic)
- #if self.
+
+
+ # Process initial config primitives
+ if self.INITIAL_CFG in dic:
+ for icp_dic in dic.pop(self.INITIAL_CFG):
+ process_initial_config(icp_dic)
- # Process config primitives
+ # NS service prmitive
if self.CONF_PRIM in dic:
- for cprim in dic.pop(self.CONF_PRIM):
- conf_prim = {self.NAME: cprim.pop(self.NAME), self.DESC : 'TestDescription'}
- if self.USER_DEF_SCRIPT in cprim:
- conf_prim[self.USER_DEF_SCRIPT] = \
- cprim.pop(self.USER_DEF_SCRIPT)
- self.conf_prims.append(conf_prim)
- else:
- err_msg = (_("{0}, Only user defined script supported "
- "in config-primitive for now {}: {}").
- format(self, conf_prim, cprim))
- self.log.error(err_msg)
- raise ValidationError(message=err_msg)
+ for icp_dic in dic.pop(self.CONF_PRIM):
+ process_service_primitive(icp_dic)
# Process scaling group
if self.SCALE_GRP in dic:
for sg_dic in dic.pop(self.SCALE_GRP):
process_scale_grp(sg_dic)
- # Process initial config primitives
- if self.INITIAL_CFG in dic:
- for icp_dic in dic.pop(self.INITIAL_CFG):
- process_initial_config(icp_dic)
-
# Process the input params
if self.INPUT_PARAM_XPATH in dic:
for param in dic.pop(self.INPUT_PARAM_XPATH):
self.VENDOR: self.props[self.VENDOR],
self.VERSION: self.props[self.VERSION],
}
+ if self.LOGO in self.props:
+ tosca[self.METADATA][self.LOGO] = self.props[self.LOGO]
+
if len(self.vnfd_files) > 0:
tosca[self.IMPORT] = []
imports = []
- for vnfd_file in self.vnfd_files:
+ for vnfd_file in set(self.vnfd_files):
tosca[self.IMPORT].append('"{0}.yaml"'.format(vnfd_file))
tosca[self.TOPOLOGY_TMPL] = {}
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL] = {}
# Add the VNFDs and VLDs
+ vnf_type_vld_list = []
for idx, vnfd in self.vnfds.items():
#vnfd.generate_vnf_template(tosca, idx)
node = {
if vnfd.name in self.vnf_to_vld_map:
vld_list = self.vnf_to_vld_map[vnfd.name]
node[self.REQUIREMENTS] = []
+
for vld_idx in range(0, len(vld_list)):
- vld_link_name = "{0}{1}".format("virtualLink", vld_idx + 1)
- vld_prop = {}
- vld_prop[vld_link_name] = vld_list[vld_idx]
- node[self.REQUIREMENTS].append(vld_prop)
- if vnfd.name in self._vnf_vld_conn_point_map:
- vnf_vld_list = self._vnf_vld_conn_point_map[vnfd.name]
- for vnf_vld in vnf_vld_list:
- vnfd.generate_vld_link(vld_link_name, vnf_vld[1])
+ if vnfd.vnf_type not in vnf_type_vld_list:
+ vld_link_name = "{0}{1}".format("virtualLink", vld_idx + 1)
+ vld_prop = {}
+ vld_prop[vld_link_name] = vld_list[vld_idx]
+ node[self.REQUIREMENTS].append(vld_prop)
+ if vnfd.vnf_type not in vnf_type_vld_list:
+ vnf_type_vld_list.append(vnfd.vnf_type)
+ if vnfd.name in self._vnf_vld_conn_point_map:
+ vnf_vld_list = set(self._vnf_vld_conn_point_map[vnfd.name])
+ for vnf_vld in vnf_vld_list:
+ vnfd.generate_vld_link(vld_link_name, vnf_vld[1])
for sub_mapping in self.substitution_mapping_forwarder:
if sub_mapping[0] == vnfd.name:
vnfd.generate_forwarder_sub_mapping(sub_mapping)
- for vnfd_name, cp_name in self.vnfd_sfc_map.items():
- if vnfd.name == vnfd_name:
- vnfd.generate_sfc_link(cp_name)
+ if self.vnfd_sfc_map:
+ for vnfd_name, cp_name in self.vnfd_sfc_map.items():
+ if vnfd.name == vnfd_name:
+ vnfd.generate_sfc_link(cp_name)
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vnfd.name] = node
+ v_idx = len(self.vnfds) + 1 + len(self.duplicate_vnfd_name_list)
+ for vnfd_name in self.duplicate_vnfd_name_list:
+ node = tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vnfd_name]
+ new_node = deepcopy(node)
+ st = re.sub(r'\d+$', '', vnfd_name.rstrip('_vnfd'))
+
+ new_node[self.PROPERTIES][self.ID] = v_idx
+ node_name = "{}{}_vnfd".format(st, v_idx)
+ tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][node_name] = new_node
+ v_idx += 1
+
for vld_node_name in self.vlds:
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vld_node_name] = self.vlds[vld_node_name]
self.INITIAL_CFG: icpt
})
+ if len(self.service_primitive) > 0:
+ if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
+ tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
+
+ for icp in self.service_primitive:
+ if len(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL]) > 0:
+ node_name = list(tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL].keys())[0]
+ icpt = {
+ self.TYPE: self.T_NS_PRIMITIVE,
+ self.TARGETS : "[{0}]".format(node_name)
+ }
+ icpt.update(icp)
+ tosca[self.TOPOLOGY_TMPL][self.POLICIES].append({
+ 'ns_service_primitives': icpt
+ })
+
+
if len(self.placement_groups) > 0:
if self.POLICIES not in tosca[self.TOPOLOGY_TMPL]:
tosca[self.TOPOLOGY_TMPL][self.POLICIES] = []
self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
})
+ for prim in self.service_primitive:
+ if 'properties' in prim:
+ if 'user_defined_script' in prim['properties']:
+ script = os.path.basename(prim['properties']['user_defined_script'])
+ files.append({
+ self.TYPE: 'script',
+ self.NAME: script,
+ self.DEST: "{}/{}".format(self.SCRIPT_DIR, script),
+ })
+
+ if 'logo' in self.props:
+ icon = os.path.basename(self.props['logo'])
+ files.append({
+ self.TYPE: 'icons',
+ self.NAME: icon,
+ self.DEST: "{}/{}".format(self.ICON_DIR, icon),
+ })
+
+
# TODO (pjoseph): Add support for config scripts,
# charms, etc
('mgmt_interface', 'http_endpoint', 'monitoring_param')
vnf_prefix_type = 'tosca.nodes.nfv.riftio.'
+ VALUE_TYPE_CONVERSION_MAP = {
+ 'INTEGER' : 'integer',
+ 'INT' : 'integer',
+ 'STRING' : 'string',
+ 'DECIMAL' : 'float',
+ 'INTEGER': 'INTEGER',
+ 'DECIMAL' : 'float'
+
+ }
def __init__(self,
log,
for parameter in init_conf_prim['parameter']:
init_conf['parameter'].append({parameter['name']: parameter['value']})
init_config_prims.append(init_conf)
- vnf_conf['initial_config_primitive'] = init_config_prims
+ vnf_conf['initial_config'] = init_config_prims
self.vnf_configuration = vnf_conf
mon_param['url_path'] = param['http_endpoint_ref']
if 'json_query_method' in param:
mon_param['json_query_method'] = param['json_query_method'].lower()
+ #if 'value_type' in param:
+ # mon_param['constraints'] = {}
+ # mon_param['constraints']['value_type'] = YangVnfd.VALUE_TYPE_CONVERSION_MAP[param['value_type'].upper()]
if 'group_tag' in param:
ui_param['group_tag'] = param['group_tag']
if 'widget_type' in param:
dic = deepcopy(self.yang)
try:
for key in self.REQUIRED_FIELDS:
- self.props[key] = dic.pop(key)
+ if key in dic:
+ self.props[key] = dic.pop(key)
self.id = self.props[self.ID]
mon_param = {}
mon_param['properties'] = self.mon_param[0]
tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param'] = mon_param #TEST
- if len(self.mon_param) == 2:
- mon_param = {}
- mon_param = {}
- mon_param['properties'] = self.mon_param[1]
- tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES]['monitoring_param_1'] = mon_param
+ if len(self.mon_param) > 1:
+ for idx in range(1, len(self.mon_param)):
+ monitor_param_name = "monitoring_param_{}".format(idx)
+ mon_param = {}
+ mon_param = {}
+ mon_param['properties'] = self.mon_param[idx]
+ tosca[self.TOPOLOGY_TMPL][self.NODE_TMPL][vdu.get_name(self.name)][self.CAPABILITIES][monitor_param_name] = mon_param
node = {}
node[self.TYPE] = self.T_VNF1
for vdu in self.vdus:
if conn_point in vdu.cp_name_to_cp_node:
conn_point_node_name = vdu.cp_name_to_cp_node[conn_point]
- self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS].\
- append({virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")})
+ if {virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")} not in \
+ self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS]:
+ self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.REQUIREMENTS].\
+ append({virtualLink : "[{0}, {1}]".format(conn_point_node_name, "virtualLink")})
if self.REQUIREMENTS not in self.tosca[self.NODE_TYPES][self.vnf_type]:
self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS] = []
- self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS].append({virtualLink : {
- "type": "tosca.nodes.nfv.VL"}})
+ if {virtualLink : {"type": "tosca.nodes.nfv.VL"}} not in self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS]:
+ self.tosca[self.NODE_TYPES][self.vnf_type][self.REQUIREMENTS].append({virtualLink : {
+ "type": "tosca.nodes.nfv.VL"}})
+
def generate_forwarder_sub_mapping(self, sub_link):
if self.CAPABILITIES not in self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING]:
self.tosca[self.TOPOLOGY_TMPL][self.SUBSTITUTION_MAPPING][self.CAPABILITIES] = {}
import rift.package.cloud_init
import rift.package.script
import rift.package.store
-
+import rift.package.icon
class YangTranslator(object):
'''Invokes translation methods.'''
self.get_yangs()
else:
if 'nsd' in self.yangs:
- self.output_files['nsd'].append(self.yangs['nsd'][0]['short_name'])
+ self.output_files['nsd'].append(self.yangs['nsd'][0]['short_name'].replace(' ','_'))
if 'vnfd' in self.yangs:
for yang_vnfd in self.yangs['vnfd']:
- self.output_files['vnfd'].append(yang_vnfd['short_name'])
+ self.output_files['vnfd'].append(yang_vnfd['short_name'].replace(' ','_'))
self.node_translator = TranslateDescriptors(self.log,
self.yangs,
raise ValidationError(message="No NSD or VNFD uploaded")
else:
if 'nsd' in self.yangs:
- sub_folder_name = self.yangs['nsd'][0]['short_name']
+ sub_folder_name = self.yangs['nsd'][0]['short_name'].replace(' ','_')
elif 'vnfd' in self.yangs:
- sub_folder_name = self.yangs['vnfd'][0]['short_name']
+ sub_folder_name = self.yangs['vnfd'][0]['short_name'].replace(' ','_')
subdir = os.path.join(output_dir, sub_folder_name)
def_dir = os.path.join(subdir, 'Definitions')
os.makedirs(def_dir)
shutil.copy2(riftio_src_file, def_dir + "/riftiotypes.yaml")
+ tosca_meta_entry_file = None
for tmpl_key in tmpl_out:
tmpl = tmpl_out[tmpl_key]
- entry_file = os.path.join(def_dir, tmpl_key+'.yaml')
+ file_name = tmpl_key.replace(' ','_')
+ entry_file = os.path.join(def_dir, file_name+'.yaml')
+ if file_name.endswith('nsd'):
+ tosca_meta_entry_file = file_name
self.log.debug(_("Writing file {0}").
format(entry_file))
with open(entry_file, 'w+') as f:
f.write(tmpl[ToscaTemplate.TOSCA])
+ if tosca_meta_entry_file is None:
+ tosca_meta_entry_file = sub_folder_name
# Create the Tosca meta
meta_dir = os.path.join(subdir, 'TOSCA-Metadata')
os.makedirs(meta_dir)
CSAR-Version: 1.1
Created-By: RIFT.io
Entry-Definitions: Definitions/'''
- meta_data = "{}{}".format(meta, sub_folder_name+'.yaml')
+ meta_data = "{}{}".format(meta, tosca_meta_entry_file+'.yaml')
meta_file = os.path.join(meta_dir, 'TOSCA.meta')
self.log.debug(_("Writing file {0}:\n{1}").
format(meta_file, meta_data))
pkg.extract_file(script_file_map[fname],
dest_path)
break
+ elif ftype == 'icons':
+ icon_file_map = \
+ rift.package.icon.PackageIconExtractor.package_icon_files(pkg)
+ if fname in icon_file_map:
+ self.log.debug(_("Extracting script {0} to {1}").
+ format(fname, dest_path))
+ pkg.extract_file(icon_file_map[fname],
+ dest_path)
+ break
else:
self.log.warn(_("Unknown file type {0}: {1}").
os.chdir(subdir)
try:
- zip_file = key + '.zip'
+ zip_file = sub_folder_name + '.zip'
zip_path = os.path.join(output_dir, zip_file)
self.log.debug(_("Creating zip file {0}").format(zip_path))
zip_cmd = "zip -r {}.partial ."
data_types:
tosca.datatypes.network.riftio.vnf_configuration:
properties:
- config_delay:
- constraints:
- - greater_or_equal: 0
- default: 0
- required: no
- type: integer
config_details:
type: map
- config_priority:
- constraints:
- - greater_than: 0
- type: integer
- config_template:
- required: no
- type: string
config_type:
type: string
capability_types:
vendor: RIFT.io
version: 1.0
vnf_configuration:
- config_delay: 0
config_details:
script_type: bash
- config_priority: 2
- config_template: "\n#!/bin/bash\n\n# Rest API config\nping_mgmt_ip=<rw_mgmt_ip>\n\
- ping_mgmt_port=18888\n\n# VNF specific configuration\npong_server_ip=<rw_connection_point_name\
- \ pong_vnfd/cp0>\nping_rate=5\nserver_port=5555\n\n# Make rest API calls\
- \ to configure VNF\ncurl -D /dev/stdout \\\n -H \"Accept: application/vnd.yang.data+xml\"\
- \ \\\n -H \"Content-Type: application/vnd.yang.data+json\" \\\n \
- \ -X POST \\\n -d \"{\\\"ip\\\":\\\"$pong_server_ip\\\", \\\"port\\\
- \":$server_port}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set server info for\
- \ ping!\"\n exit $rc\nfi\n\ncurl -D /dev/stdout \\\n -H \"Accept:\
- \ application/vnd.yang.data+xml\" \\\n -H \"Content-Type: application/vnd.yang.data+json\"\
- \ \\\n -X POST \\\n -d \"{\\\"rate\\\":$ping_rate}\" \\\n http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate\n\
- rc=$?\nif [ $rc -ne 0 ]\nthen\n echo \"Failed to set ping rate!\"\n\
- \ exit $rc\nfi\n\nexit 0\n"
config_type: script
capabilities:
http_endpoint:
from rift.mano.config_data import config
import gi
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
- VnfdYang,
+ ProjectVnfdYang,
RwYang,
)
reader = config.VnfInitialConfigPrimitiveReader.from_yaml_file_hdl(yaml_hdl)
expected_primitives = [
- VnfdYang.InitialConfigPrimitive.from_dict({
+ VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict({
"name": "prim_1", "seq": 0, "parameter": [
{
"name": "hostname",
},
]
}),
- VnfdYang.InitialConfigPrimitive.from_dict({
+ VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict({
"name": "prim_2", "seq": 1
}),
]
--- /dev/null
+#!/usr/bin/env python3
+
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import asyncio
+import gi
+import logging
+import os
+import sys
+import unittest
+import xmlrunner
+
+from rift.mano.utils import project
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+NAME = 'test'
+XPATH = "/rw-project:project[rw-project:name={}]".format(quoted_key(NAME))
+
+class TestCase(unittest.TestCase):
+ log = None
+
+ @classmethod
+ def set_logger(cls, log):
+ cls.log = log
+
+ def setUp(self):
+ if not TestCase.log:
+ log = logging.getLogger()
+ log.setLevel( logging.ERROR)
+
+ def test_create_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject.create_from_xpath(XPATH, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log, name=NAME)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_update(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Set project name later
+ 3. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert None == proj.name
+
+ proj.name = NAME
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.name = 'testing'
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_update_from_xpath(self):
+ """
+ Asserts:
+ 1. Instance of project
+ 2. Update from XPATH
+ 2. project name in instance is correct
+ """
+ proj = project.ManoProject(TestCase.log)
+ assert proj
+ assert proj.name is None
+
+ proj.set_from_xpath(XPATH)
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ try:
+ proj.set_from_xpath(XPATH)
+ except project.ManoProjNameSetErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ obj = project.ManoProject.create_from_xpath(proj.prefix, TestCase.log)
+ assert obj
+ assert NAME == obj.name
+ assert XPATH == obj.prefix
+
+ def test_create_from_xpath1(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = XPATH + '/rw:project/rw-project:project/rw-project:project/rw-project:project/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[id=\'1232334\']'
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath2(self):
+ """
+ Asserts:
+ 1. Instance of project from xpath
+ 2. project name in instance is correct
+ """
+ xpath = '/rw-project:project[name={}]'.format(quoted_key(NAME))
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ assert proj
+ assert NAME == proj.name
+ assert XPATH == proj.prefix
+
+ def test_create_from_xpath_invalid(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathNoProjErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid1(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project/{}'.format(NAME)
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def test_create_from_xpath_invalid2(self):
+ """
+ Asserts:
+ 1. Exception due to invalid XPATH format for extracting project
+ """
+ xpath = '/rw-project:project[id=test]'
+ try:
+ proj = project.ManoProject.create_from_xpath(xpath, TestCase.log)
+ except project.ManoProjXpathKeyErr as e:
+ TestCase.log.debug("Expected exception: {}".format(e))
+ else:
+ assert False
+
+ def tearDown(self):
+ pass
+
+
+def main(argv=sys.argv[1:]):
+ logging.basicConfig(format='TEST %(message)s')
+
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ parser.add_argument('-n', '--no-runner', action='store_true')
+
+ args, unknown = parser.parse_known_args(argv)
+ if args.no_runner:
+ runner = None
+
+ # Set the global logging level
+ log = logging.getLogger()
+ log.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+ TestCase.set_logger(log)
+
+ # The unittest framework requires a program name, so use the name of this
+ # file instead (we do not want to have to pass a fake program name to main
+ # when this is called from the interpreter).
+ unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+ main()
from gi.repository import (
RwYang,
- NsdYang,
- RwNsdYang,
- VnfdYang,
- RwVnfdYang,
+ ProjectNsdYang as NsdYang,
+ RwProjectNsdYang as RwNsdYang,
+ ProjectVnfdYang as VnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
RwVldYang
)
def read_from_file(module_list, infile, input_format, descr_type):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
for module in module_list:
model.load_module(module)
descr = None
if descr_type == "nsd":
- descr = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descr = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
else:
- descr = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descr = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
if input_format == 'json':
json_str = open(infile).read()
1. Reserve and login to a VM a root
2. cd ${RIFT_ROOT}
-3. ./rift-shell -e
+3. ./rift-shell
4. cd modules/core/mc/confd_client
4. ./confd_client_opdata.sh (will measure the rate for fetching operational data)
5. ./confd_client_config.sh (will measure the rate of config writes and reads)
+++ /dev/null
-#!/bin/bash
-# install the launchpad systemd service
-# these files should work on both ub16 and fc20
-
-if [ $(whoami) != "root" ]; then
- echo must be root
- exit 1
-fi
-
-
-cat <<EOF >/etc/systemd/system/launchpad.service
-[Unit]
-Description=RIFT.ware Launchpad
-After=network-online.target
-
-[Service]
-Type=oneshot
-RemainAfterExit=yes
-ExecStart=/bin/sh -c 'nohup sudo -b -H /usr/rift/rift-shell -r -i /usr/rift -a /usr/rift/.artifacts -- ./demos/launchpad.py --use-xml-mode'
-ExecStop=/bin/sh -c 'killall rwmain'
-
-[Install]
-WantedBy=default.target
-EOF
-
-chmod 664 /etc/systemd/system/launchpad.service
-
-if ! systemctl daemon-reload; then
- echo "WARNING: Not able to reload daemons: this must be run in a privileged container: sudo systemctl daemon-reload ; sudo systemctl enable launchpad.service"
-else
- # enable launchpad at boot - should always succeed in a privileged container
- systemctl enable launchpad.service
-fi
-
-# start launchpad?
-#sudo systemctl start launchpad.service
-
-echo
-echo "Launchpad service created and enabled. Run 'sudo systemctl start launchpad.service' to start the service."
-echo
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME rwmano_examples)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
##
#
# Author(s): Anil Gunturu
# Creation Date: 03/26/2014
-#
+
cmake_minimum_required(VERSION 2.8)
${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_aws.tar.gz
${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_epa.tar.gz
${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_epa.tar.gz
- ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_epa.tar.gz)
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_epa.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_vip.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_vip.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_vip.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_image.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_image.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_vca.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_image.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_image.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_vnfd_with_vnf_input_parameters.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/pong_vnfd_with_vnf_input_parameters.tar.gz
+ ${CMAKE_CURRENT_BINARY_DIR}/ping_pong_nsd_with_vnf_input_parameters.tar.gz)
add_custom_command(
OUTPUT ${PACKAGE_OUTPUT}
COMMAND ${CMAKE_CURRENT_BINARY_DIR}/generate_packages.sh
- DEPENDS mano_yang rwcloud_yang ${CMAKE_CURRENT_SOURCE_DIR}/ping_pong_nsd.py
+ DEPENDS
+ mano_yang
+ rwcloud_yang
+ ${CMAKE_CURRENT_SOURCE_DIR}/ping_pong_nsd.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/ping_setup.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/ping_rate.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/ping_start_stop.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/pong_setup.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/pong_start_stop.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/ping_initial_config.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/pong_initial_config.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/start_traffic.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/stop_traffic.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/rift/mano/examples/primitive_test.py
)
add_custom_target(ping_pong_pkg_gen ALL
FILES ${PACKAGE_OUTPUT}
DESTINATION
usr/rift/mano/examples/ping_pong_ns
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
rift_python_install_tree(
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
FILES
rift/mano/examples/ping_pong_nsd.py
+ rift/mano/examples/ping_setup.py
+ rift/mano/examples/ping_start_stop.py
+ rift/mano/examples/pong_setup.py
+ rift/mano/examples/pong_start_stop.py
rift/mano/examples/start_traffic.py
rift/mano/examples/ping_set_rate.py
+ rift/mano/examples/stop_traffic.py
+ rift/mano/examples/ping_initial_config.py
+ rift/mano/examples/pong_initial_config.py
+ rift/mano/examples/ping_set_rate.py
+ rift/mano/examples/primitive_test.py
)
install(
PROGRAMS
- rift/mano/examples/ping_config.py
+ rift/mano/examples/ping_scale.py
stand_up_ping_pong
DESTINATION usr/bin
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwYang', '1.0')
-from gi.repository import NsdYang, VldYang, VnfdYang, RwYang
+# TODO (Philip): Relook at this code
+
+from gi.repository import (
+ NsdYang,
+ VldYang,
+ VnfdYang,
+ RwYang
+ )
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
-model = RwYang.Model.create_libncx()
+model = RwYang.Model.create_libyang()
model.load_schema_ypbc(VldYang.get_schema())
model.load_schema_ypbc(NsdYang.get_schema())
model.load_schema_ypbc(VnfdYang.get_schema())
def configure_vld(proxy, vld_xml_hdl):
vld_xml = vld_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VLD protobuf: %s", vld_xml)
- vld = VldYang.YangData_Vld_VldCatalog_Vld()
+ vld = VldYang.YangData_RwProject_Project_VldCatalog_Vld()
vld.from_xml_v2(model, vld_xml)
logger.debug("Sending VLD to netconf: %s", vld)
def configure_vnfd(proxy, vnfd_xml_hdl):
vnfd_xml = vnfd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into VNFD protobuf: %s", vnfd_xml)
- vnfd = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd = VnfdYang.YangData_VnfdCatalog_Vnfd()
vnfd.from_xml_v2(model, vnfd_xml)
logger.debug("Sending VNFD to netconf: %s", vnfd)
def configure_nsd(proxy, nsd_xml_hdl):
nsd_xml = nsd_xml_hdl.read()
logger.debug("Attempting to deserialize XML into NSD protobuf: %s", nsd_xml)
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = NsdYang.YangData_NsdCatalog_Nsd()
nsd.from_xml_v2(model, nsd_xml)
logger.debug("Sending NSD to netconf: %s", nsd)
action="append",
default=[],
type=argparse.FileType(),
- help="VLD XML File Path",
+ #help="VLD XML File Path",
+ # We do not support uploading VLD separately
+ help=argparse.SUPRESS,
)
parser.add_argument(
rm -rf ${BINARY_DIR}/pong_vnfd_with_epa
rm -rf ${BINARY_DIR}/ping_pong_nsd_with_epa
+rm -rf ${BINARY_DIR}/ping_vnfd_with_vca
+rm -rf ${BINARY_DIR}/pong_vnfd_with_vca
+rm -rf ${BINARY_DIR}/ping_pong_nsd_with_vca
+
+rm -rf ${BINARY_DIR}/ping_vnfd_with_vip
+rm -rf ${BINARY_DIR}/pong_vnfd_with_vip
+rm -rf ${BINARY_DIR}/ping_pong_nsd_with_vip
+
+rm -rf ${BINARY_DIR}/ping_vnfd_with_scaling
+rm -rf ${BINARY_DIR}/pong_vnfd_with_scaling
+rm -rf ${BINARY_DIR}/ping_pong_nsd_with_scaling
+
+rm -rf ${BINARY_DIR}/ping_vnfd_with_vnf_input_parameters
+rm -rf ${BINARY_DIR}/pong_vnfd_with_vnf_input_parameters
+rm -rf ${BINARY_DIR}/ping_pong_nsd_with_vnf_input_parameters
+
# Generate image md5sum
ping_md5sum="$(md5sum ${PING_QCOW_IMAGE} | cut -f1 -d" ")"
cp -r ${BINARY_DIR}/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_image
mkdir -p ${BINARY_DIR}/ping_vnfd_with_image/images
mkdir -p ${BINARY_DIR}/pong_vnfd_with_image/images
-
### Generate descriptors for AWS
-${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/aws --format=json --aws
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/aws --format=yaml --aws
### Move the generated artifacts to appropriate directories
mv ${BINARY_DIR}/aws/ping_vnfd ${BINARY_DIR}/ping_vnfd_aws
### ReMove the original directories
rm -rf ${BINARY_DIR}/aws
+### Generate descriptors for VNF Input Parameters
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/vnf_input_parameters --format=yaml --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum} --pong-cloud-init=pong_cloud_init.cfg --ping-cloud-init=ping_cloud_init.cfg --vnf-input-parameter
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/vnf_input_parameters/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_vnf_input_parameters
+mv ${BINARY_DIR}/vnf_input_parameters/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_vnf_input_parameters
+mv ${BINARY_DIR}/vnf_input_parameters/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_with_vnf_input_parameters
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/vnf_input_parameters
+
### Generate descriptors with EPA
-${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_epa --format=json --epa --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_epa --format=yaml --epa --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
### Move the generated artifacts to appropriate directories
mv ${BINARY_DIR}/with_epa/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_epa
### ReMove the original directories
rm -rf ${BINARY_DIR}/with_epa
+### Generate descriptors with VCA conf
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_vca --format=yaml --vca_conf --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/with_vca/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_vca
+mv ${BINARY_DIR}/with_vca/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_vca
+mv ${BINARY_DIR}/with_vca/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_with_vca
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/with_vca
+
+### Generate descriptors with Virtual-IP
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_vip --format=yaml --virtual-ip --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/with_vip/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_vip
+mv ${BINARY_DIR}/with_vip/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_vip
+mv ${BINARY_DIR}/with_vip/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_with_vip
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/with_vip
+
+### Generate descriptors with scaling
+${SOURCE_DIR}/ping_pong_nsd.py --outdir=${BINARY_DIR}/with_scaling --format=yaml --scale --ping-image-md5=${ping_md5sum} --pong-image-md5=${pong_md5sum}
+
+### Move the generated artifacts to appropriate directories
+mv ${BINARY_DIR}/with_scaling/ping_vnfd ${BINARY_DIR}/ping_vnfd_with_scaling
+mv ${BINARY_DIR}/with_scaling/pong_vnfd ${BINARY_DIR}/pong_vnfd_with_scaling
+mv ${BINARY_DIR}/with_scaling/ping_pong_nsd ${BINARY_DIR}/ping_pong_nsd_with_scaling
+
+### ReMove the original directories
+rm -rf ${BINARY_DIR}/with_scaling
+
+
# copy a dummy image for now
if [ -e ${PING_QCOW_IMAGE} ]; then
# Add RIFT Logos
${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_aws
${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_aws
+# Add RIFT Logos
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_vnf_input_parameters/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_vnf_input_parameters/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_with_vnf_input_parameters/icons
+
+cp ${PING_VNFD_LOGO} ${BINARY_DIR}/ping_vnfd_with_vnf_input_parameters/icons/
+cp ${PONG_VNFD_LOGO} ${BINARY_DIR}/pong_vnfd_with_vnf_input_parameters/icons/
+cp ${PING_PONG_NSD_LOGO} ${BINARY_DIR}/ping_pong_nsd_with_vnf_input_parameters/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_vnf_input_parameters
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_vnf_input_parameters
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_vnf_input_parameters
+
# Add RIFT Logos
mkdir -p ${BINARY_DIR}/ping_vnfd_with_epa/icons
mkdir -p ${BINARY_DIR}/pong_vnfd_with_epa/icons
${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_epa
${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_epa
${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_epa
+
+# Add RIFT Logos
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_vca/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_vca/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_with_vca/icons
+
+cp ${PING_VNFD_LOGO} ${BINARY_DIR}/ping_vnfd_with_vca/icons/
+cp ${PONG_VNFD_LOGO} ${BINARY_DIR}/pong_vnfd_with_vca/icons/
+cp ${PING_PONG_NSD_LOGO} ${BINARY_DIR}/ping_pong_nsd_with_vca/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_vca
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_vca
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_vca
+
+
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_vip/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_vip/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_with_vip/icons
+
+cp ${PING_VNFD_LOGO} ${BINARY_DIR}/ping_vnfd_with_vip/icons/
+cp ${PONG_VNFD_LOGO} ${BINARY_DIR}/pong_vnfd_with_vip/icons/
+cp ${PING_PONG_NSD_LOGO} ${BINARY_DIR}/ping_pong_nsd_with_vip/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_vip
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_vip
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_vip
+
+mkdir -p ${BINARY_DIR}/ping_vnfd_with_scaling/icons
+mkdir -p ${BINARY_DIR}/pong_vnfd_with_scaling/icons
+mkdir -p ${BINARY_DIR}/ping_pong_nsd_with_scaling/icons
+
+cp ${PING_VNFD_LOGO} ${BINARY_DIR}/ping_vnfd_with_scaling/icons/
+cp ${PONG_VNFD_LOGO} ${BINARY_DIR}/pong_vnfd_with_scaling/icons/
+cp ${PING_PONG_NSD_LOGO} ${BINARY_DIR}/ping_pong_nsd_with_scaling/icons/
+
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_vnfd_with_scaling
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} pong_vnfd_with_scaling
+${RIFT_INSTALL}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh ${BINARY_DIR} ping_pong_nsd_with_scaling
+++ /dev/null
-#!/usr/bin/env python3
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import argparse
-import logging
-import os
-import stat
-import subprocess
-import sys
-import time
-import yaml
-
-def ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
- sh_file = "{}/ping_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
- logger.debug("Creating script file %s" % sh_file)
- f = open(sh_file, "w")
- f.write(r'''
-#!/bin/bash
-
-# Rest API config
-ping_mgmt_ip='{}'
-ping_mgmt_port={}
-
-# VNF specific configuration
-pong_server_ip='{}'
-ping_rate=5
-server_port=5555
-'''.format(mgmt_ip, mgmt_port, pong_cp))
-
- f.write(r'''
-# Check if the port is open
-DELAY=1
-MAX_TRIES=60
-COUNT=0
-while true; do
- COUNT=$(expr $COUNT + 1)
- timeout 1 bash -c "cat < /dev/null > /dev/tcp/${ping_mgmt_ip}/${ping_mgmt_port}"
- rc=$?
- if [ $rc -ne 0 ]
- then
- echo "Failed to connect to server ${ping_mgmt_ip}:${ping_mgmt_port} for ping with $rc!"
- if [ ${COUNT} -gt ${MAX_TRIES} ]; then
- exit $rc
- fi
- sleep ${DELAY}
- else
- break
- fi
-done
-
-# Make rest API calls to configure VNF
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to set server info for ping!"
- exit $rc
-fi
-
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"rate\":$ping_rate}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to set ping rate!"
- exit $rc
-fi
-
-output=$(curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"enable\":true}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
-if [[ $output == *"Internal Server Error"* ]]
-then
- echo $output
- exit 3
-else
- echo $output
-fi
-
-exit 0
-''')
- f.close()
- os.chmod(sh_file, stat.S_IRWXU)
- if not dry_run:
- rc = subprocess.call(sh_file, shell=True)
- if rc:
- logger.error("Config failed: {}".format(rc))
- return False
- return True
-
-
-
-def main(argv=sys.argv[1:]):
- try:
- parser = argparse.ArgumentParser()
- parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
- parser.add_argument("--dry-run", action="store_true")
- parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
- args = parser.parse_args()
-
- run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
- if not os.path.exists(run_dir):
- os.makedirs(run_dir)
- log_file = "{}/rift_ping_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
- logging.basicConfig(filename=log_file, level=logging.DEBUG)
- logger = logging.getLogger()
-
- ch = logging.StreamHandler()
- if args.verbose:
- ch.setLevel(logging.DEBUG)
- else:
- ch.setLevel(logging.INFO)
-
- # create formatter and add it to the handlers
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
- except Exception as e:
- print("Got exception:{}".format(e))
- raise e
-
- try:
- dry_run = args.dry_run
-
- yaml_str = args.yaml_cfg_file.read()
- logger.debug("Input YAML file: {}".format(yaml_str))
- yaml_cfg = yaml.load(yaml_str)
- logger.debug("Input YAML: {}".format(yaml_cfg))
-
- # Check if this is post scale out trigger
- if yaml_cfg['trigger'] != 'post_scale_out':
- logger.error("Unexpected trigger {}".
- format(yaml_cfg['trigger']))
- raise
-
- pong_cp = ""
- for vnfr in yaml_cfg['vnfrs_others']:
- # Find the pong VNFR, assuming vnfr name will
- # have pong_vnfd as a substring
- if 'pong_vnfd' in vnfr['name']:
- for cp in vnfr['connection_points']:
- logger.debug("Connection point {}".format(cp))
- if 'cp0' in cp['name']:
- pong_cp = cp['ip_address']
- break
- if not len(pong_cp):
- logger.error("Did not get Pong cp0 IP")
- raise
-
- for vnfr in yaml_cfg['vnfrs_in_group']:
- mgmt_ip = vnfr['rw_mgmt_ip']
- mgmt_port = vnfr['rw_mgmt_port']
- if ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
- logger.info("Successfully configured Ping {} at {}".
- format(vnfr['name'], mgmt_ip))
- else:
- logger.error("Config of ping {} with {} failed".
- format(vnfr['name'], mgmt_ip))
- raise
-
- except Exception as e:
- logger.error("Got exception {}".format(e))
- logger.exception(e)
- raise e
-
-if __name__ == "__main__":
- main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_initial_config(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ def find_vnfr(vnfr_dict, name):
+ try:
+ for k, v in vnfr_dict.items():
+ if v['name'] == name:
+ return v
+ except KeyError:
+ logger.warn("Could not find vnfr for name : %s", name)
+
+ def find_vnfr_by_substring(vnfr_dict, name):
+ try:
+ for k, v in vnfr_dict.items():
+ if name in v['name']:
+ return v
+ except KeyError:
+ logger.warn("Could not find vnfr by name : %s", name)
+
+ def find_cp_ip(vnfr, cp_name):
+ for cp in vnfr['connection_point']:
+ logger.debug("Connection point: %s", format(cp))
+ if cp_name in cp['name']:
+ return cp['ip_address']
+
+ raise ValueError("Could not find vnfd %s connection point %s", cp_name)
+
+ def find_vnfr_mgmt_ip(vnfr):
+ return vnfr['mgmt_ip_address']
+
+ def get_vnfr_name(vnfr):
+ return vnfr['name']
+
+ def find_vdur_mgmt_ip(vnfr):
+ return vnfr['vdur'][0]['vm_management_ip']
+
+ def find_param_value(param_list, input_param):
+ for item in param_list:
+ logger.debug("Parameter: %s", format(item))
+ if item['name'] == input_param:
+ return item['value']
+
+ def set_ping_destination(mgmt_ip, port, pong_ip, pong_port):
+ curl_cmd = 'curl -D /dev/null -H "Accept: application/vnd.yang.data' \
+ '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ '-X POST -d "{{\\"ip\\":\\"{pong_ip}\\", \\"port\\":{pong_port}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/ping/server'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=port,
+ pong_ip=pong_ip,
+ pong_port=pong_port)
+
+ logger.debug("Executing set-server cmd: %s", curl_cmd)
+ count = 0
+ delay = 20
+ max_tries = 12
+ rc = 0
+ while True:
+ count += 1
+ proc = subprocess.Popen(curl_cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process: {}".format(proc))
+
+ if proc.returncode == 0:
+ logger.info("Success response ")
+ rc = 0
+ break
+
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ rc = 7
+ break
+ # Try after delay
+ time.sleep(delay)
+
+ return rc
+
+ # Get the required and optional parameters
+ ping_vnfr = find_vnfr(yaml_cfg['vnfr'], yaml_cfg['vnfr_name'])
+ ping_vnf_mgmt_ip = find_vnfr_mgmt_ip(ping_vnfr)
+ pong_vnfr = yaml_cfg['vnfr'][2]
+ pong_svc_ip = find_cp_ip(pong_vnfr, 'pong_vnfd/cp0')
+
+ # Get the required and optional parameters
+ mgmt_ip = ping_vnf_mgmt_ip
+ mgmt_port = 18888
+ rate = 5
+
+ rc = set_ping_destination(mgmt_ip, mgmt_port, pong_svc_ip, 5555)
+ if rc != 0:
+ return rc
+
+ cmd = 'curl -D /dev/null -H "Accept: application/vnd.yang.data' \
+ '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ '-X POST -d "{{\\"rate\\":{rate}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/ping/rate'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ rate=rate)
+
+ logger.debug("Executing set-rate cmd: %s", cmd)
+ count = 0
+ delay = 10
+ max_tries = 12
+ rc = 0
+
+ while True:
+ count += 1
+ proc = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+
+ logger.debug("Process: {}".format(proc))
+
+ if proc.returncode == 0:
+ rc = 0
+ break
+
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ rc = 7
+ break
+ # Try after delay
+ time.sleep(delay)
+
+ return rc
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_initial_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('ping-initial-config')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = ping_initial_config(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import argparse
+import simplejson
import os
+import yaml
import shutil
import sys
import uuid
+import random
+
+from xml.dom.minidom import parseString
import gi
gi.require_version('RwYang', '1.0')
gi.require_version('RwVnfdYang', '1.0')
gi.require_version('VnfdYang', '1.0')
gi.require_version('RwNsdYang', '1.0')
-gi.require_version('NsdYang', '1.0')
-
from gi.repository import (
- RwNsdYang,
- NsdYang,
- RwVnfdYang,
- VnfdYang,
+ RwNsdYang as RwNsdYang,
+ NsdYang as NsdYang,
+ RwVnfdYang as RwVnfdYang,
+ VnfdYang as VnfdYang,
RwYang,
)
self.descriptor = None
def write_to_file(self, module_list, outdir, output_format):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
for module in module_list:
model.load_module(module)
else:
raise Exception("Invalid output format for the descriptor")
- def get_json(self, module_list):
- model = RwYang.Model.create_libncx()
- for module in module_list:
- model.load_module(module)
- print(self.descriptor.to_json(model))
-
class VirtualNetworkFunction(ManoDescriptor):
def __init__(self, name, instance_count=1):
self.vnfd_catalog = None
self.vnfd = None
+ self.mano_ut = False
+ self.use_ns_init_conf = False
+ self.use_vca_conf = False
+ self.use_charm = False
self.instance_count = instance_count
self._placement_groups = []
+ self.config_files = []
self.use_vnf_init_conf = False
super(VirtualNetworkFunction, self).__init__(name)
def add_placement_group(self, group):
self._placement_groups.append(group)
- def compose(self, image_name, cloud_init="", cloud_init_file="", endpoint=None, mon_params=[],
- mon_port=8888, mgmt_port=8888, num_vlr_count=1, num_ivlr_count=1,
- num_vms=1, image_md5sum=None, mano_ut=False):
+ def add_vnf_conf_param_charm(self):
+ vnfd = self.descriptor.vnfd[0]
+ confparam = vnfd.config_parameter
+
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "mgmt_ip",
+ "description": "Management IP address",
+ "attribute": "../../../mgmt-interface, ip-address",
+ "parameter" : [{
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "ssh-hostname"
+ }]
+ })
+ confparam.config_parameter_source.append(src)
+
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "username",
+ "description": "SSH username",
+ "value": "fedora",
+ "parameter" : [{
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "ssh-username"
+ }]
+ })
+ confparam.config_parameter_source.append(src)
+
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "ssh_key",
+ "description": "SSH private key file",
+ "attribute": "../../../mgmt-interface/ssh-key, private-key-file",
+ "parameter" : [{
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "ssh-private-key"
+ }]
+ })
+ confparam.config_parameter_source.append(src)
+
+ # Check if pong
+ if 'pong_' in self.name:
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "service_ip",
+ "description": "IP on which Pong service is listening",
+ "attribute": "../../../connection-point[name='pong_vnfd/cp0'], ip-address",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-server",
+ "config_primitive_parameter_ref": "server-ip"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "service_port",
+ "description": "Port on which server listens for incoming data packets",
+ "value": "5555",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-server",
+ "config_primitive_parameter_ref": "server-port"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+
+ else:
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "rate",
+ "description": "Rate of packet generation",
+ "value": "5",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-rate",
+ "config_primitive_parameter_ref": "rate"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+
+ req = confparam.create_config_parameter_request()
+ req.from_dict({
+ "name": "pong_ip",
+ "description": "IP on which Pong service is listening",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-server",
+ "config_primitive_parameter_ref": "server-ip"
+ },
+ ]
+ })
+ confparam.config_parameter_request.append(req)
+ req = confparam.create_config_parameter_request()
+ req.from_dict({
+ "name": "pong_port",
+ "description": "Port on which Pong service is listening",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-server",
+ "config_primitive_parameter_ref": "server-port"
+ },
+ ]
+ })
+ confparam.config_parameter_request.append(req)
+
+ def add_vnf_conf_param(self):
+ vnfd = self.descriptor.vnfd[0]
+ confparam = vnfd.config_parameter
+
+ def get_params(param):
+ # Check if pong
+ if 'pong_' in self.name:
+ params = [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": param
+ },
+ {
+ "config_primitive_name_ref": "start-stop",
+ "config_primitive_parameter_ref": param
+ },
+ ]
+ else:
+ params = [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": param
+ },
+ {
+ "config_primitive_name_ref": "set-rate",
+ "config_primitive_parameter_ref": param
+ },
+ {
+ "config_primitive_name_ref": "start-stop",
+ "config_primitive_parameter_ref": param
+ },
+ ]
+ return params
+
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "mgmt_ip",
+ "description": "Management address",
+ "attribute": "../../../mgmt-interface, ip-address",
+ "parameter" : get_params("mgmt_ip")
+ })
+ confparam.config_parameter_source.append(src)
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "mgmt_port",
+ "description": "Management port",
+ "descriptor": "../../../mgmt-interface/port",
+ "parameter" : get_params("mgmt_port")
+ })
+ confparam.config_parameter_source.append(src)
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "username",
+ "description": "Management username",
+ "value": "admin",
+ "parameter" : get_params("username")
+ })
+ confparam.config_parameter_source.append(src)
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "password",
+ "description": "Management password",
+ "value": "admin",
+ "parameter" : get_params("password")
+ })
+ confparam.config_parameter_source.append(src)
+
+ # Check if pong
+ if 'pong_' in self.name:
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "service_ip",
+ "description": "IP on which Pong service is listening",
+ "attribute": "../../../connection-point[name='pong_vnfd/cp0'], ip-address",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "service_ip"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "service_port",
+ "description": "Port on which server listens for incoming data packets",
+ "value": "5555",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "service_port"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+
+ else:
+ src = confparam.create_config_parameter_source()
+ src.from_dict({
+ "name": "rate",
+ "description": "Rate of packet generation",
+ "value": "5",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "set-rate",
+ "config_primitive_parameter_ref": "rate"
+ },
+ ]
+ })
+ confparam.config_parameter_source.append(src)
+
+ req = confparam.create_config_parameter_request()
+ req.from_dict({
+ "name": "pong_ip",
+ "description": "IP on which Pong service is listening",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "pong_ip"
+ },
+ ]
+ })
+ confparam.config_parameter_request.append(req)
+ req = confparam.create_config_parameter_request()
+ req.from_dict({
+ "name": "pong_port",
+ "description": "Port on which Pong service is listening",
+ "parameter" : [
+ {
+ "config_primitive_name_ref": "config",
+ "config_primitive_parameter_ref": "pong_port"
+ },
+ ]
+ })
+ confparam.config_parameter_request.append(req)
+
+ def add_ping_vca_config(self):
+ vnfd = self.descriptor.vnfd[0]
+ # Add vnf configuration
+ vnf_config = vnfd.vnf_configuration
+
+ # vnf_config.config_attributes.config_delay = 10
+
+ # Select "script" configuration
+ vnf_config.script.script_type = 'rift'
+
+ # Add config primitive
+ prim = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "config",
+ "parameter": [
+ {"name": "mgmt_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "mgmt_port", "data_type": "INTEGER", "read_only": "true"},
+ {"name": "username", "data_type": "STRING", "read_only": "true"},
+ {"name": "password", "data_type": "STRING", "read_only": "true"},
+ {"name": "pong_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "pong_port", "data_type": "INTEGER","read_only": "true",
+ "default_value": "5555"},
+ ],
+ "user_defined_script": "ping_setup.py",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "set-rate",
+ "parameter": [
+ {"name": "mgmt_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "mgmt_port", "data_type": "INTEGER", "read_only": "true"},
+ {"name": "username", "data_type": "STRING", "read_only": "true"},
+ {"name": "password", "data_type": "STRING", "read_only": "true"},
+ {"name": "rate", "data_type": "INTEGER",
+ "default_value": "5"},
+ ],
+ "user_defined_script": "ping_rate.py",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "start-stop",
+ "parameter": [
+ {"name": "mgmt_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "mgmt_port", "data_type": "INTEGER", "read_only": "true"},
+ {"name": "username", "data_type": "STRING", "read_only": "true"},
+ {"name": "password", "data_type": "STRING", "read_only": "true"},
+ {"name": "start", "data_type": "BOOLEAN",
+ "default_value": "true"}
+ ],
+ "user_defined_script": "ping_start_stop.py",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ # Add initial config primitive
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 1,
+ "config_primitive_ref": "config",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 2,
+ "config_primitive_ref": "set-rate",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ if self.use_ns_init_conf is False:
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 3,
+ "config_primitive_ref": "start-stop",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ def add_pong_vca_config(self):
+ vnfd = self.descriptor.vnfd[0]
+ # Add vnf configuration
+ vnf_config = vnfd.vnf_configuration
+
+ # Select "script" configuration
+ vnf_config.script.script_type = 'rift'
+
+ # Add config primitive
+ prim = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "config",
+ "parameter": [
+ {"name": "mgmt_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "mgmt_port", "data_type": "INTEGER", "read_only": "true"},
+ {"name": "username", "data_type": "STRING", "read_only": "true"},
+ {"name": "password", "data_type": "STRING", "read_only": "true"},
+ {"name": "service_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "service_port", "data_type": "INTEGER", "read_only": "true"},
+ ],
+ "user_defined_script": "pong_setup.py",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "start-stop",
+ "parameter": [
+ {"name": "mgmt_ip", "data_type": "STRING", "read_only": "true"},
+ {"name": "mgmt_port", "data_type": "INTEGER", "read_only": "true"},
+ {"name": "username", "data_type": "STRING", "read_only": "true"},
+ {"name": "password", "data_type": "STRING", "read_only": "true"},
+ {"name": "start", "data_type": "BOOLEAN",
+ "default_value": "true"}
+ ],
+ "user_defined_script": "pong_start_stop.py",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ # Add initial config primitive
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 1,
+ "config_primitive_ref": "config",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ if self.use_ns_init_conf is False:
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 2,
+ "config_primitive_ref": "start-stop",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ def add_charm_config(self):
+ vnfd = self.descriptor.vnfd[0]
+ # Add vnf configuration
+ vnf_config = vnfd.vnf_configuration
+
+ if 'pong_' in self.name:
+ mode = "pong"
+ else:
+ mode = "ping"
+
+ # Select "script" configuration
+ vnf_config.juju.charm = 'pingpong'
+
+ # Add config primitive
+ vnf_config.create_config_primitive()
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "start",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "stop",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "restart",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "config",
+ "parameter": [
+ {"name": "ssh-hostname", "data_type": "STRING"},
+ {"name": "ssh-username", "data_type": "STRING"},
+ {"name": "ssh-private-key", "data_type": "STRING"},
+ {"name": "mode", "data_type": "STRING",
+ "default_value": "{}".format(mode),
+ "read_only": "true"},
+ ],
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "set-server",
+ "parameter": [
+ {"name": "server-ip", "data_type": "STRING"},
+ {"name": "server-port", "data_type": "INTEGER"},
+ ],
+ })
+ vnf_config.config_primitive.append(prim)
+
+ if mode == 'ping':
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "set-rate",
+ "parameter": [
+ {"name": "rate", "data_type": "INTEGER",
+ "default_value": "5"},
+ ],
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "start-traffic",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ prim = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_ConfigPrimitive.from_dict({
+ "name": "stop-traffic",
+ })
+ vnf_config.config_primitive.append(prim)
+
+ # Add initial config primitive
+ vnf_config.create_initial_config_primitive()
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 1,
+ "config_primitive_ref": "config",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 2,
+ "config_primitive_ref": "start",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 3,
+ "config_primitive_ref": "set-server",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ if mode == 'ping':
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 4,
+ "config_primitive_ref": "set-rate",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ if self.use_ns_init_conf is False:
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 5,
+ "config_primitive_ref": "start-traffic",
+ },
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ def compose(self, image_name, vnf_descriptor_message, cloud_init="", cloud_init_file="",
+ endpoint=None, mon_params=[], mon_port=8888, mgmt_port=8888, num_vlr_count=1,
+ num_ivlr_count=1, num_vms=1, image_md5sum=None, mano_ut=False,
+ use_ns_init_conf=False, use_vca_conf=False, use_charm=False, use_static_ip=False,
+ multidisk=None, port_security=None, metadata_vdud=None, use_ipv6=False,
+ use_virtual_ip=False, vnfd_input_params=None, script_input_params=None, explicit_port_seq=False, mgmt_net=True):
+
+ self.mano_ut = mano_ut
+ self.use_ns_init_conf = use_ns_init_conf
+ self.use_vca_conf = use_vca_conf
+ self.use_charm = use_charm
+
self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
self.id = str(uuid.uuid1())
vnfd = self.descriptor.vnfd.add()
vnfd.short_name = self.name
vnfd.vendor = 'RIFT.io'
vnfd.logo = 'rift_logo.png'
- vnfd.description = 'This is an example RIFT.ware VNF'
+ vnfd.description = vnf_descriptor_message
vnfd.version = '1.0'
self.vnfd = vnfd
- if mano_ut is True:
+ if explicit_port_seq:
+ # ping and pong vnfds will have 2 and 5 internal interfaces respectively
+ num_ivlr_count = 2
+ if 'pong' in vnfd.name:
+ num_ivlr_count = 5
+
+ if mano_ut or use_virtual_ip or explicit_port_seq:
internal_vlds = []
for i in range(num_ivlr_count):
internal_vld = vnfd.internal_vld.add()
internal_vlds.append(internal_vld)
for i in range(num_vlr_count):
+ index = i+1 if mgmt_net else i
cp = vnfd.connection_point.add()
cp.type_yang = 'VPORT'
- cp.name = '%s/cp%d' % (self.name, i)
-
+ cp.name = '%s/cp%d' % (self.name, index)
+ if port_security is not None:
+ cp.port_security_enabled = port_security
+
+ if mgmt_net:
+ cp = vnfd.connection_point.add()
+ cp.type_yang = 'VPORT'
+ cp.name = '%s/cp0' % (self.name)
+
if endpoint is not None:
- endp = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_HttpEndpoint(
+ endp = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_HttpEndpoint(
path=endpoint, port=mon_port, polling_interval_secs=2
)
vnfd.http_endpoint.append(endp)
monp.http_endpoint_ref = endpoint
vnfd.monitoring_param.append(monp)
-
for i in range(num_vms):
# VDU Specification
vdu = vnfd.vdu.add()
mgmt_intf.dashboard_params.path = endpoint
mgmt_intf.dashboard_params.port = mgmt_port
- if cloud_init_file and len(cloud_init_file):
- vdu.cloud_init_file = cloud_init_file
- else:
- vdu.cloud_init = cloud_init
- if aws:
- vdu.cloud_init += " - [ systemctl, restart, --no-block, elastic-network-interfaces.service ]\n"
+ if use_charm:
+ mgmt_intf.ssh_key = True
+
+ if not self.use_charm:
+ if cloud_init_file and len(cloud_init_file):
+ vdu.cloud_init_file = cloud_init_file
+ else:
+ vdu.cloud_init = cloud_init
+ if aws:
+ vdu.cloud_init += " - [ systemctl, restart, --no-block, elastic-network-interfaces.service ]\n"
# sepcify the guest EPA
if use_epa:
if aws:
vdu.image = 'rift-ping-pong'
+ elif multidisk:
+ ping_test_data, pong_test_data = multidisk
+ test_data = ping_test_data
+ if 'pong' in vnfd.name:
+ test_data = pong_test_data
+ for vol_name, vol_attrs in test_data.items():
+ vol = vdu.volumes.add()
+ vol.name = vol_name
+ vol.device_type = vol_attrs[0]
+ vol.device_bus = vol_attrs[1]
+ vol.size = vol_attrs[2]
+ if vol_attrs[3]:
+ vol.image = vol_attrs[3]
+ # Bug RIFT-15165. Will comment out later once the bug is fixed
+ #else:
+ # vol.ephemeral = True
+
+ if vol_attrs[4] is not None:
+ vol.boot_priority = vol_attrs[4]
else:
vdu.image = image_name
if image_md5sum is not None:
vdu.image_checksum = image_md5sum
- if mano_ut is True:
+ if explicit_port_seq:
+ # pong vnfd will have 3 ordered interfaces out of 7 and all interfaces of ping vnfd are ordered
+ ordered_interfaces_count = num_vlr_count + num_ivlr_count
+ if 'pong' in vnfd.name:
+ ordered_interfaces_count = 3
+ interface_positions_list = random.sample(range(1, 2**32-1), ordered_interfaces_count-1)
+ random.shuffle(interface_positions_list)
+
+ if mano_ut or use_virtual_ip or explicit_port_seq:
+ vip_internal_intf_pool_start = 51
for i in range(num_ivlr_count):
internal_cp = vdu.internal_connection_point.add()
if vnfd.name.find("ping") >= 0:
- cp_name = "ping"
+ cp_name = "ping_vnfd"
else:
- cp_name = "pong"
+ cp_name = "pong_vnfd"
internal_cp.name = cp_name + "/icp{}".format(i)
internal_cp.id = cp_name + "/icp{}".format(i)
internal_cp.type_yang = 'VPORT'
- ivld_cp = internal_vlds[i].internal_connection_point_ref.add()
+ ivld_cp = internal_vlds[i].internal_connection_point.add()
ivld_cp.id_ref = internal_cp.id
-
- internal_interface = vdu.internal_interface.add()
+ if use_virtual_ip:
+ vcp = internal_vlds[i].virtual_connection_points.add()
+ if 'ping' in vnfd.name:
+ vcp.name = 'ivcp-0'
+ else:
+ vcp.name = 'ivcp-1'
+ vcp.type_yang = 'VPORT'
+ vcp.associated_cps.append(internal_cp.id)
+ int_interface_positon_set = False
+ internal_interface = vdu.interface.add()
internal_interface.name = 'fab%d' % i
- internal_interface.vdu_internal_connection_point_ref = internal_cp.id
+ internal_interface.type_yang = 'INTERNAL'
+ internal_interface.internal_connection_point_ref = internal_cp.id
internal_interface.virtual_interface.type_yang = 'VIRTIO'
-
+ if explicit_port_seq and interface_positions_list:
+ internal_interface.position = interface_positions_list.pop()
+ int_interface_positon_set = True
# internal_interface.virtual_interface.vpci = '0000:00:1%d.0'%i
-
+ if use_virtual_ip and int_interface_positon_set is False:
+ internal_interface.position = vip_internal_intf_pool_start
+ vip_internal_intf_pool_start += 1
+
+ if mgmt_net:
+ #adding a vlr for management network
+ num_vlr_count = num_vlr_count + 1
+
+ vip_external_intf_pool_start = 1
for i in range(num_vlr_count):
- external_interface = vdu.external_interface.add()
- external_interface.name = 'eth%d' % i
- external_interface.vnfd_connection_point_ref = '%s/cp%d' % (self.name, i)
- if use_epa:
- external_interface.virtual_interface.type_yang = 'VIRTIO'
- else:
- external_interface.virtual_interface.type_yang = 'VIRTIO'
+ ext_interface_positon_set = False
+ external_interface = vdu.interface.add()
+ external_interface.name = 'eth%d' % (i)
+ external_interface.type_yang = 'EXTERNAL'
+ external_interface.external_connection_point_ref = '%s/cp%d' % (self.name, i)
+ # The first external interface need to be set as the packets use this
+ # and we bring up only the eth0 (mgmt interface) and eth1 in the ping and
+ # pong VMs
+ if explicit_port_seq and (i == 0):
+ external_interface.position = 1
+ elif explicit_port_seq and interface_positions_list:
+ external_interface.position = interface_positions_list.pop()
+ ext_interface_positon_set = True
+
+ external_interface.virtual_interface.type_yang = 'VIRTIO'
# external_interface.virtual_interface.vpci = '0000:00:2%d.0'%i
+ if use_virtual_ip and ext_interface_positon_set is False:
+ external_interface.position = vip_external_intf_pool_start
+ vip_external_intf_pool_start += 1
+
+ if use_static_ip and not(mgmt_net and i == 0):
+ if 'pong_' in self.name:
+ external_interface.static_ip_address = '31.31.31.31'
+ if use_ipv6:
+ external_interface.static_ip_address = '3fee:1111:1111::1234'
+ else:
+ external_interface.static_ip_address = '31.31.31.32'
+ if use_ipv6:
+ external_interface.static_ip_address = '3fee:1111:1111::1235'
+
+
+ if metadata_vdud:
+ # Metadata for VDU
+ # Add config files, custom-meta-data for both ping, pong VNFs. Enable 'boot data drive' only for ping VNF
+ meta_data = {'EMS_IP':'10.1.2.3', 'Licenseserver_IP':'192.168.1.1'}
+ for i in range(2):
+ self.config_files.append('test_cfg_file_{}.txt'.format(random.randint(1,1000)))
+
+ supplemental_boot_data = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_Vdu_SupplementalBootData()
+
+ # Add config files
+ for cfg_file in self.config_files:
+ config_file = supplemental_boot_data.config_file.add()
+ config_file.source = cfg_file
+ config_file.dest = os.path.join('/tmp',cfg_file)
+
+ # enable 'boot data drive' only for ping VNF
+ if 'ping_' in vnfd.name:
+ supplemental_boot_data.boot_data_drive = True
+ # Add custom metadata
+ for name, value in meta_data.items():
+ custom_meta_data = supplemental_boot_data.custom_meta_data.add()
+ custom_meta_data.name = name
+ custom_meta_data.value = value
+
+ vdu.supplemental_boot_data = supplemental_boot_data
+
+ if vnfd_input_params:
+ # Input parameters for vnfd
+ supplemental_boot_data = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_Vdu_SupplementalBootData()
+
+ if 'ping_' in vnfd.name or 'pong_' in vnfd.name:
+ cloud_init_data = supplemental_boot_data.custom_meta_data.add()
+ cloud_init_data.destination = 'CLOUD_INIT'
+ cloud_init_data.name = 'custom_cloud_init_data'
+ cloud_init_data.value = 'cc_init_data'
+ cloud_init_data.data_type = 'STRING'
+
+ cloud_meta_data = supplemental_boot_data.custom_meta_data.add()
+ cloud_meta_data.destination = 'CLOUD_METADATA'
+ cloud_meta_data.name = 'custom_cloud_meta_data'
+ cloud_meta_data.value = 'cc_meta_data'
+ cloud_meta_data.data_type = 'STRING'
+
+ vdu.supplemental_boot_data = supplemental_boot_data
+
+ if script_input_params:
+ # Input parameters for vnfd
+ supplemental_boot_data = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_Vdu_SupplementalBootData()
+
+ if 'ping_' in vnfd.name or 'pong_' in vnfd.name:
+ cloud_init_data = supplemental_boot_data.custom_meta_data.add()
+ cloud_init_data.destination = 'CLOUD_METADATA'
+ cloud_init_data.name = 'CI-script-init-data'
+ cloud_init_data.value = 'default_script_init_data'
+ cloud_init_data.data_type = 'STRING'
+
+ vdu.supplemental_boot_data = supplemental_boot_data
for group in self._placement_groups:
placement_group = vnfd.placement_groups.add()
member_vdu = placement_group.member_vdus.add()
member_vdu.member_vdu_ref = vdu.id
+ # Add VNF access point
+ if use_vca_conf:
+ if use_charm:
+ self.add_vnf_conf_param_charm()
+ self.add_charm_config()
+ else:
+ self.add_vnf_conf_param()
+ if 'pong_' in self.name:
+ self.add_pong_vca_config()
+ else:
+ self.add_ping_vca_config()
+ else:
+ if 'pong_' in self.name:
+ self.add_pong_config()
+ else:
+ self.add_ping_config()
+
+ def add_ping_config(self):
+ vnfd = self.descriptor.vnfd[0]
+ # Add vnf configuration
+ vnf_config = vnfd.vnf_configuration
+ vnf_config.script.script_type = 'rift'
+
+ # Add initial config primitive
+ init_config = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 1,
+ "name": "Ping config",
+ "user_defined_script": "ping_initial_config.py",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
+
+ def add_pong_config(self):
+ vnfd = self.descriptor.vnfd[0]
+ # Add vnf configuration
+ vnf_config = vnfd.vnf_configuration
+ vnf_config.script.script_type = 'rift'
+
+ # Add initial config primitive
+ init_config =RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_VnfConfiguration_InitialConfigPrimitive.from_dict(
+ {
+ "seq": 1,
+ "name": "Pong config",
+ "user_defined_script": "pong_initial_config.py",
+ }
+ )
+ vnf_config.initial_config_primitive.append(init_config)
def write_to_file(self, outdir, output_format):
dirpath = "%s/%s" % (outdir, self.name)
output_format)
self.add_scripts(outdir)
- def add_scripts(self, outdir):
+ def add_cloud_init(self, outdir):
script_dir = os.path.join(outdir, self.name, 'cloud_init')
try:
os.makedirs(script_dir)
if not os.path.isdir(script_dir):
raise
- if 'ping' in self.name:
+ if 'ping_' in self.name:
script_file = os.path.join(script_dir, 'ping_cloud_init.cfg')
cfg = PING_USERDATA_FILE
else:
with open(script_file, "w") as f:
f.write("{}".format(cfg))
- # Copy the vnf_init_config script
- if self.use_vnf_init_conf and ('ping' in self.name):
- script_name = 'ping_set_rate.py'
+ # Create the config files in script_dir
+ for cfg_file in self.config_files:
+ with open(os.path.join(script_dir, cfg_file), 'w') as f:
+ f.write('metadata-vdud test')
+
+ def add_scripts(self, outdir):
+ if not self.use_charm:
+ self.add_cloud_init(outdir)
+
+ if not self.use_charm:
+ if self.use_vca_conf:
+ self.add_vca_scripts(outdir)
+ else:
+ self.add_config_scripts(outdir)
+
+ def add_config_scripts(self, outdir):
+ dest_path = os.path.join(outdir, self.name, 'scripts')
+ try:
+ os.makedirs(dest_path)
+ except OSError:
+ if not os.path.isdir(dest_path):
+ raise
- src_path = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
+ if 'pong_' in self.name:
+ scripts = ['pong_initial_config.py']
+ else:
+ scripts = ['ping_initial_config.py']
+
+ for script_name in scripts:
+ src_path = os.path.dirname(os.path.abspath(
+ os.path.realpath(__file__)))
script_src = os.path.join(src_path, script_name)
if not os.path.exists(script_src):
src_path = os.path.join(os.environ['RIFT_ROOT'],
- 'modules/core/mano/examples/ping_pong_ns/rift/mano/examples')
+ 'modules/core/mano/examples/'
+ 'ping_pong_ns/rift/mano/examples')
script_src = os.path.join(src_path, script_name)
- dest_path = os.path.join(outdir, self.name, 'scripts')
- os.makedirs(dest_path, exist_ok=True)
+ shutil.copy2(script_src, dest_path)
+
+ def add_vca_scripts(self, outdir):
+ dest_path = os.path.join(outdir, self.name, 'scripts')
+ try:
+ os.makedirs(dest_path)
+ except OSError:
+ if not os.path.isdir(dest_path):
+ raise
+
+ if 'pong_' in self.name:
+ scripts = ['pong_setup.py', 'pong_start_stop.py']
+ else:
+ scripts = ['ping_setup.py', 'ping_rate.py', 'ping_start_stop.py']
+
+ for script_name in scripts:
+ src_path = os.path.dirname(os.path.abspath(
+ os.path.realpath(__file__)))
+ script_src = os.path.join(src_path, script_name)
+ if not os.path.exists(script_src):
+ src_path = os.path.join(os.environ['RIFT_ROOT'],
+ 'modules/core/mano/examples/'
+ 'ping_pong_ns/rift/mano/examples')
+ script_src = os.path.join(src_path, script_name)
shutil.copy2(script_src, dest_path)
self.vnfd_config = {}
self._placement_groups = []
- def ping_config(self, mano_ut, use_ns_init_conf, use_vnf_init_conf):
- suffix = ''
- if mano_ut:
- ping_cfg = r'''
-#!/bin/bash
-
-echo "!!!!!!!! Executed ping Configuration !!!!!!!!!"
- '''
- else:
- ping_cfg = r'''
-#!/bin/bash
-
-# Rest API config
-ping_mgmt_ip='<rw_mgmt_ip>'
-ping_mgmt_port=18888
-
-# VNF specific configuration
-pong_server_ip='<rw_connection_point_name pong_vnfd%s/cp0>'
-ping_rate=5
-server_port=5555
-
-# Make rest API calls to configure VNF
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to set server info for ping!"
- exit $rc
-fi
-''' % suffix
-
- if use_vnf_init_conf is False:
- ping_cfg +='''
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"rate\":$ping_rate}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to set ping rate!"
- exit $rc
-fi
-
-'''
- if use_ns_init_conf:
- ping_cfg += "exit 0\n"
- else:
- ping_cfg +='''
-output=$(curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"enable\":true}" \
- http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
-if [[ $output == *"Internal Server Error"* ]]
-then
- echo $output
- exit 3
-else
- echo $output
-fi
-
-exit 0
-'''
- return ping_cfg
+ def default_config(self, constituent_vnfd, vnfd, mano_ut, use_ns_init_conf, use_vnf_init_conf):
+ vnf_config = vnfd.vnfd.vnf_configuration
- def pong_config(self, mano_ut, use_ns_init_conf):
- suffix = ''
- if mano_ut:
- pong_cfg = r'''
-#!/bin/bash
-
-echo "!!!!!!!! Executed pong Configuration !!!!!!!!!"
- '''
- else:
- pong_cfg = r'''
-#!/bin/bash
-
-# Rest API configuration
-pong_mgmt_ip='<rw_mgmt_ip>'
-pong_mgmt_port=18889
-# username=<rw_username>
-# password=<rw_password>
-
-# VNF specific configuration
-pong_server_ip='<rw_connection_point_name pong_vnfd%s/cp0>'
-server_port=5555
-
-# Make Rest API calls to configure VNF
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
- http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/server
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to set server(own) info for pong!"
- exit $rc
-fi
-
-''' % suffix
-
- if use_ns_init_conf:
- pong_cfg += "exit 0\n"
- else:
- pong_cfg +='''
-curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
- -X POST \
- -d "{\"enable\":true}" \
- http://${pong_mgmt_ip}:${pong_mgmt_port}/api/v1/pong/adminstatus/state
-rc=$?
-if [ $rc -ne 0 ]
-then
- echo "Failed to enable pong service!"
- exit $rc
-fi
-
-exit 0
-'''
- return pong_cfg
-
- def pong_fake_juju_config(self, vnf_config):
-
- if vnf_config:
- # Select "script" configuration
- vnf_config.juju.charm = 'clearwater-aio-proxy'
-
- # Set the initital-config
- vnf_config.create_initial_config_primitive()
- init_config = VnfdYang.InitialConfigPrimitive.from_dict({
- "seq": 1,
- "name": "config",
- "parameter": [
- {"name": "proxied_ip", "value": "<rw_mgmt_ip>"},
- ]
- })
- vnf_config.initial_config_primitive.append(init_config)
-
- init_config_action = VnfdYang.InitialConfigPrimitive.from_dict({
- "seq": 2,
- "name": "action1",
- "parameter": [
- {"name": "Pong Connection Point", "value": "pong_vnfd/cp0"},
- ]
- })
- vnf_config.initial_config_primitive.append(init_config_action)
- init_config_action = VnfdYang.InitialConfigPrimitive.from_dict({
- "seq": 3,
- "name": "action2",
- "parameter": [
- {"name": "Ping Connection Point", "value": "ping_vnfd/cp0"},
- ]
- })
- vnf_config.initial_config_primitive.append(init_config_action)
-
- # Config parameters can be taken from config.yaml and
- # actions from actions.yaml in the charm
- # Config to set the home domain
- vnf_config.create_service_primitive()
- config = VnfdYang.ServicePrimitive.from_dict({
- "name": "config",
- "parameter": [
- {"name": "home_domain", "data_type": "STRING"},
- {"name": "base_number", "data_type": "STRING"},
- {"name": "number_count", "data_type": "INTEGER"},
- {"name": "password", "data_type": "STRING"},
- ]
- })
- vnf_config.service_primitive.append(config)
-
- config = VnfdYang.ServicePrimitive.from_dict({
- "name": "create-update-user",
- # "user-defined-script":"/tmp/test.py",
- "parameter": [
- {"name": "number", "data_type": "STRING", "mandatory": True},
- {"name": "password", "data_type": "STRING", "mandatory": True},
- ]
- })
- vnf_config.service_primitive.append(config)
-
- config = VnfdYang.ServicePrimitive.from_dict({
- "name": "delete-user",
- "parameter": [
- {"name": "number", "data_type": "STRING", "mandatory": True},
- ]
- })
- vnf_config.service_primitive.append(config)
-
- def default_config(self, const_vnfd, vnfd, mano_ut,
- use_ns_init_conf,
- use_vnf_init_conf):
- vnf_config = vnfd.vnfd.vnf_configuration
-
- vnf_config.config_attributes.config_priority = 0
- vnf_config.config_attributes.config_delay = 0
-
- # Select "script" configuration
- vnf_config.script.script_type = 'bash'
-
- if vnfd.name == 'pong_vnfd' or vnfd.name == 'pong_vnfd_with_epa' or vnfd.name == 'pong_vnfd_aws':
- vnf_config.config_attributes.config_priority = 1
- vnf_config.config_template = self.pong_config(mano_ut, use_ns_init_conf)
- # First priority config delay will delay the entire NS config delay
- if mano_ut is False:
- vnf_config.config_attributes.config_delay = 60
- else:
- # This is PONG and inside mano_ut
- # This is test only
- vnf_config.config_attributes.config_delay = 10
- # vnf_config.config_template = self.pong_config(vnf_config, use_ns_init_conf)
-
- if vnfd.name == 'ping_vnfd' or vnfd.name == 'ping_vnfd_with_epa' or vnfd.name == 'ping_vnfd_aws':
- vnf_config.config_attributes.config_priority = 2
- vnf_config.config_template = self.ping_config(mano_ut,
- use_ns_init_conf,
- use_vnf_init_conf)
- if use_vnf_init_conf:
- vnf_config.initial_config_primitive.add().from_dict(
- {
- "seq": 1,
- "name": "set ping rate",
- "user_defined_script": "ping_set_rate.py",
- "parameter": [
- {
- 'name': 'rate',
- 'value': '5',
- },
- ],
- }
- )
def ns_config(self, nsd, vnfd_list, mano_ut):
# Used by scale group
if mano_ut:
nsd.service_primitive.add().from_dict(
{
- "name": "ping config",
+ "name": "ping scale",
"user_defined_script": "{}".format(os.path.join(
os.environ['RIFT_ROOT'],
'modules/core/mano',
else:
nsd.service_primitive.add().from_dict(
{
- "name": "ping config",
- "user_defined_script": "ping_config.py"
+ "name": "ping scale",
+ "user_defined_script": "ping_scale.py"
})
+ def ns_xconfig(self, nsd):
+ """Used for a testcase."""
+ nsd.service_primitive.add().from_dict(
+ {
+ "name": "primitive_test",
+ "user_defined_script": "primitive_test.py"
+ }
+ )
+
def ns_initial_config(self, nsd):
- nsd.initial_config_primitive.add().from_dict(
+ nsd.initial_service_primitive.add().from_dict(
{
"seq": 1,
"name": "start traffic",
],
}
)
+ nsd.terminate_service_primitive.add().from_dict(
+ {
+ "seq": 1,
+ "name": "stop traffic",
+ "user_defined_script": "stop_traffic.py",
+ "parameter": [
+ {
+ 'name': 'userid',
+ 'value': 'rift',
+ },
+ ],
+ }
+ )
def add_scale_group(self, scale_group):
self._scale_groups.append(scale_group)
'value_type': mon_param.value_type,
'vnfd_monitoring_param': [
{'vnfd_id_ref': vnfd_obj.vnfd.id,
- 'vnfd_monitoring_param_ref': mon_param.id}]
+ 'vnfd_monitoring_param_ref': mon_param.id,
+ 'member_vnf_index_ref': self.get_member_vnf_index(vnfd_obj.vnfd.id)}],
})
self.nsd.monitoring_param.append(nsd_monp)
param_id += 1
-
-
+ def get_vnfd_id(self, index):
+ for cv in self.nsd.constituent_vnfd:
+ if cv.member_vnf_index == index:
+ return cv.vnfd_id_ref
+
+ def get_member_vnf_index(self, vnfd_id):
+ for cv in self.nsd.constituent_vnfd:
+ if cv.vnfd_id_ref == vnfd_id:
+ return cv.member_vnf_index
+
+ def add_conf_param_map(self):
+ nsd = self.nsd
+
+ confparam_map = nsd.config_parameter_map.add()
+ confparam_map.id = '1'
+ confparam_map.config_parameter_source.member_vnf_index_ref = 2
+ confparam_map.config_parameter_source.vnfd_id_ref = self.get_vnfd_id(2)
+ confparam_map.config_parameter_source.config_parameter_source_ref = 'service_ip'
+ confparam_map.config_parameter_request.member_vnf_index_ref = 1
+ confparam_map.config_parameter_request.vnfd_id_ref = self.get_vnfd_id(1)
+ confparam_map.config_parameter_request.config_parameter_request_ref = 'pong_ip'
+
+ confparam_map = nsd.config_parameter_map.add()
+ confparam_map.id = '2'
+ confparam_map.config_parameter_source.member_vnf_index_ref = 2
+ confparam_map.config_parameter_source.vnfd_id_ref = self.get_vnfd_id(2)
+ confparam_map.config_parameter_source.config_parameter_source_ref = 'service_port'
+ confparam_map.config_parameter_request.member_vnf_index_ref = 1
+ confparam_map.config_parameter_request.vnfd_id_ref = self.get_vnfd_id(1)
+ confparam_map.config_parameter_request.config_parameter_request_ref = 'pong_port'
def compose(self, vnfd_list, cpgroup_list, mano_ut,
+ ns_descriptor_message,
use_ns_init_conf=True,
- use_vnf_init_conf=True,):
+ use_vnf_init_conf=True,
+ use_vca_conf=False,
+ use_ipv6=False,
+ port_security = None,
+ use_virtual_ip=False,
+ primitive_test=False,
+ vnfd_input_params=False,
+ script_input_params=False,
+ mgmt_net=True):
if mano_ut:
# Disable NS initial config primitive
nsd.short_name = self.name
nsd.vendor = 'RIFT.io'
nsd.logo = 'rift_logo.png'
- nsd.description = 'Toy NS'
+ nsd.description = ns_descriptor_message
nsd.version = '1.0'
nsd.input_parameter_xpath.append(
NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
- xpath="/nsd:nsd-catalog/nsd:nsd/nsd:vendor",
+ xpath="/nsd-catalog/nsd/vendor",
)
)
+ if vnfd_input_params:
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath="/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vendor",
+ )
+ )
+
ip_profile = nsd.ip_profiles.add()
ip_profile.name = "InterVNFLink"
ip_profile.description = "Inter VNF Link"
ip_profile.ip_profile_params.ip_version = "ipv4"
ip_profile.ip_profile_params.subnet_address = "31.31.31.0/24"
ip_profile.ip_profile_params.gateway_address = "31.31.31.210"
+ if use_ipv6:
+ ip_profile.ip_profile_params.ip_version = "ipv6"
+ ip_profile.ip_profile_params.subnet_address = "3fee:1111:1111::/64"
+ ip_profile.ip_profile_params.gateway_address = "3fee:1111:1111::1"
vld_id = 1
for cpgroup in cpgroup_list:
vld.version = '1.0'
vld.type_yang = 'ELAN'
vld.ip_profile_ref = 'InterVNFLink'
- for cp in cpgroup:
+ for i, cp in enumerate(cpgroup):
cpref = vld.vnfd_connection_point_ref.add()
cpref.member_vnf_index_ref = cp[0]
cpref.vnfd_id_ref = cp[1]
cpref.vnfd_connection_point_ref = cp[2]
-
+ if use_virtual_ip:
+ vcp = vld.virtual_connection_points.add()
+ vcp.name = 'vcp-{}'.format(i)
+ vcp.type_yang = 'VPORT'
+ if port_security is not None:
+ vcp.port_security_enabled = port_security
+ vcp.associated_cps.append(cpref.vnfd_connection_point_ref)
+
vnfd_index_map = {}
member_vnf_index = 1
for vnfd in vnfd_list:
constituent_vnfd.start_by_default = False
constituent_vnfd.vnfd_id_ref = vnfd.descriptor.vnfd[0].id
- self.default_config(constituent_vnfd, vnfd, mano_ut,
- use_ns_init_conf, use_vnf_init_conf)
+ if use_vca_conf is False:
+ self.default_config(constituent_vnfd, vnfd, mano_ut,
+ use_ns_init_conf, use_vnf_init_conf)
member_vnf_index += 1
+ if vnfd_input_params:
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath="/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vendor" % (constituent_vnfd.vnfd_id_ref),
+ )
+ )
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath=(
+ "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu"
+ "/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value"
+ )
+ )
+ )
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath=(
+ "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='%s']"
+ "/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value"
+ ) % (constituent_vnfd.vnfd_id_ref, vnfd.descriptor.vnfd[0].vdu[0].id)
+ )
+ )
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath=(
+ "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu"
+ "/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value"
+ )
+ )
+ )
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath=(
+ "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='%s']"
+ "/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value"
+ ) % (constituent_vnfd.vnfd_id_ref, vnfd.descriptor.vnfd[0].vdu[0].id)
+ )
+ )
+
+ if script_input_params:
+ nsd.input_parameter_xpath.append(
+ NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ xpath=(
+ "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='%s']"
+ "/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='CI-script-init-data']/vnfd:value"
+ ) % (constituent_vnfd.vnfd_id_ref, vnfd.descriptor.vnfd[0].vdu[0].id)
+ )
+ )
+
+ if mgmt_net:
+ vld = nsd.vld.add()
+ vld.id = 'mgmt_vld'
+ vld.name = 'mgmt_vld'
+ vld.type_yang = 'ELAN'
+ vld.mgmt_network = "true"
+ vld.vim_network_name = "private"
+
+ ping_cpref = vld.vnfd_connection_point_ref.add()
+ ping_cpref.member_vnf_index_ref = 1
+ ping_cpref.vnfd_id_ref = nsd.constituent_vnfd[0].vnfd_id_ref
+ ping_cpref.vnfd_connection_point_ref = 'ping_vnfd/cp0'
+
+ pong_cpref = vld.vnfd_connection_point_ref.add()
+ pong_cpref.member_vnf_index_ref = 2
+ pong_cpref.vnfd_id_ref = nsd.constituent_vnfd[1].vnfd_id_ref
+ pong_cpref.vnfd_connection_point_ref = 'pong_vnfd/cp0'
+
# Enable config primitives if either mano_ut or
# scale groups are enabled
if mano_ut or len(self._scale_groups):
if use_ns_init_conf:
self.ns_initial_config(nsd)
+ if primitive_test:
+ self.ns_xconfig(nsd)
+
for scale_group in self._scale_groups:
group_desc = nsd.scaling_group_descriptor.add()
group_desc.name = scale_group.name
config_action = group_desc.scaling_config_action.add()
config_action.trigger = trigger
config = scale_group.config_action[trigger]
- config_action.ns_config_primitive_name_ref = config['ns-config-primitive-name-ref']
+ config_action.ns_service_primitive_name_ref = config['ns-service-primitive-name-ref']
for placement_group in self._placement_groups:
group = nsd.placement_groups.add()
member.vnfd_id_ref = member_vnfd.descriptor.vnfd[0].id
member.member_vnf_index_ref = vnfd_index_map[member_vnfd]
- # self.create_mon_params(vnfd_list)
+ self.create_mon_params(vnfd_list)
+ if use_vca_conf:
+ self.add_conf_param_map()
def write_config(self, outdir, vnfds):
with open('%s/%s__%s.yaml' % (vnf_config_dir, vnfd.id, i), "w") as fh:
fh.write(yaml_data)
- def write_initial_config_script(self, outdir):
- script_name = 'start_traffic.py'
-
+ def write_config_scripts(self, outdir, script_name):
src_path = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
script_src = os.path.join(src_path, script_name)
if not os.path.exists(script_src):
dirpath,
output_format)
- # Write the initial config script
- self.write_initial_config_script(dirpath)
+ # Write the config scripts
+ self.write_config_scripts(dirpath, 'start_traffic.py')
+ self.write_config_scripts(dirpath, 'stop_traffic.py')
+ self.write_config_scripts(dirpath, 'primitive_test.py')
+
+ if len(self._scale_groups):
+ self.write_config_scripts(dirpath, 'ping_scale.py')
def get_ping_mon_params(path):
self.vnfd_count_map[vnfd] = vnfd_count
def add_config(self):
- self.config_action['post_scale_out']= {'ns-config-primitive-name-ref':
- 'ping config'}
+ self.config_action['post_scale_out']= {'ns-service-primitive-name-ref':
+ 'ping scale'}
class PlacementGroup(object):
def __init__(self, name):
def add_member(self, vdu):
self.vdu_list.append(vdu)
-
-
+def generate_vnf_and_ns_description_message(descriptor_type,
+ aws=False,
+ epa=False,
+ charm=False,
+ vca=False,
+ vip=False):
+ # Helper Function to generate a description message for
+ # VNFD/NSD based on type
+
+ suffix_list = []
+ if aws:
+ suffix_list.append(" for AWS ")
+ else:
+ suffix_list.append(' ')
+
+ if epa:
+ suffix_list.append("EPA")
+ if charm:
+ suffix_list.append("Charm")
+ if vca:
+ suffix_list.append("VCA Conf")
+ if vip:
+ suffix_list.append("VIP")
+ message = "Toy Rift.ware " + descriptor_type + 'with '.join(filter(None, [suffix_list[0], ', '.join(suffix_list[1:])]))
+ return message
def generate_ping_pong_descriptors(fmt="json",
write_to_file=False,
use_placement_group=True,
use_ns_init_conf=True,
use_vnf_init_conf=True,
- ):
+ use_vca_conf=False,
+ use_charm=False,
+ use_static_ip=False,
+ port_security=None,
+ metadata_vdud=None,
+ vnfd_input_params=None,
+ script_input_params=None,
+ multidisk=None,
+ explicit_port_seq=False,
+ use_ipv6=False,
+ primitive_test=False,
+ use_virtual_ip=False,
+ mgmt_net=True,
+ nsd_name=None):
+
# List of connection point groups
# Each connection point group refers to a virtual link
# the CP group consists of tuples of connection points
+ if explicit_port_seq:
+ # ping and pong each will have two external interfaces.
+ external_vlr_count = 2
cpgroup_list = []
for i in range(external_vlr_count):
cpgroup_list.append([])
+ if use_charm:
+ use_vca_conf = True
+
+ if use_vca_conf:
+ use_ns_init_conf = True
+ use_vnf_init_conf = False
+
suffix = ''
ping = VirtualNetworkFunction("ping_vnfd%s" % (suffix), pingcount)
ping.use_vnf_init_conf = use_vnf_init_conf
ping_userdata=ping_userdata,
ex_ping_userdata=ex_ping_userdata
)
+ ns_descriptor_message = generate_vnf_and_ns_description_message("NS", aws, use_epa,
+ use_charm, use_vca_conf,
+ use_virtual_ip)
+ vnf_descriptor_message = generate_vnf_and_ns_description_message("VNF", aws, use_epa,
+ use_charm, use_vca_conf,
+ use_virtual_ip)
ping.compose(
"Fedora-x86_64-20-20131211.1-sda-ping.qcow2",
+ vnf_descriptor_message,
ping_userdata,
use_ping_cloud_init_file,
"api/v1/ping/stats",
num_vms=num_vnf_vms,
image_md5sum=ping_md5sum,
mano_ut=mano_ut,
- )
+ use_ns_init_conf=use_ns_init_conf,
+ use_vca_conf=use_vca_conf,
+ use_charm=use_charm,
+ use_static_ip=use_static_ip,
+ port_security=port_security,
+ metadata_vdud=metadata_vdud,
+ vnfd_input_params=vnfd_input_params,
+ script_input_params=script_input_params,
+ multidisk=multidisk,
+ explicit_port_seq=explicit_port_seq,
+ use_ipv6=use_ipv6,
+ use_virtual_ip=use_virtual_ip,
+ mgmt_net=mgmt_net)
pong = VirtualNetworkFunction("pong_vnfd%s" % (suffix))
pong.compose(
"Fedora-x86_64-20-20131211.1-sda-pong.qcow2",
+ vnf_descriptor_message,
pong_userdata,
use_pong_cloud_init_file,
"api/v1/pong/stats",
num_vms=num_vnf_vms,
image_md5sum=pong_md5sum,
mano_ut=mano_ut,
- )
+ use_ns_init_conf=use_ns_init_conf,
+ use_vca_conf=use_vca_conf,
+ use_charm=use_charm,
+ use_static_ip=use_static_ip,
+ port_security=False if port_security else port_security,
+ metadata_vdud=metadata_vdud,
+ vnfd_input_params=vnfd_input_params,
+ script_input_params=script_input_params,
+ multidisk=multidisk,
+ explicit_port_seq=explicit_port_seq,
+ use_ipv6=use_ipv6,
+ use_virtual_ip=use_virtual_ip,
+ mgmt_net=mgmt_net)
# Initialize the member VNF index
member_vnf_index = 1
# define the connection point groups
for index, cp_group in enumerate(cpgroup_list):
+ if explicit_port_seq:
+ member_vnf_index = 1
desc_id = ping.descriptor.vnfd[0].id
- filename = 'ping_vnfd{}/cp{}'.format(suffix, index)
+ filename = 'ping_vnfd{}/cp{}'.format(suffix, index+1)
for idx in range(pingcount):
cp_group.append((
member_vnf_index += 1
desc_id = pong.descriptor.vnfd[0].id
- filename = 'pong_vnfd{}/cp{}'.format(suffix, index)
+ filename = 'pong_vnfd{}/cp{}'.format(suffix, index+1)
cp_group.append((
member_vnf_index,
vnfd_list = [ping, pong]
- nsd_catalog = NetworkService("ping_pong_nsd%s" % (suffix))
+ if nsd_name is None:
+ nsd_name = "ping_pong_nsd%s" % (suffix)
+
+ nsd_catalog = NetworkService(nsd_name)
if use_scale_group:
group = ScaleGroup("ping_group", max_count=10)
nsd_catalog.compose(vnfd_list,
cpgroup_list,
mano_ut,
+ ns_descriptor_message,
use_ns_init_conf=use_ns_init_conf,
- use_vnf_init_conf=use_vnf_init_conf,)
+ use_vnf_init_conf=use_vnf_init_conf,
+ use_vca_conf=use_vca_conf,
+ use_ipv6=use_ipv6,
+ port_security=port_security,
+ use_virtual_ip=use_virtual_ip,
+ primitive_test=primitive_test,
+ vnfd_input_params=vnfd_input_params,
+ script_input_params=script_input_params)
if write_to_file:
ping.write_to_file(out_dir, ping_fmt if ping_fmt is not None else fmt)
pong.write_to_file(out_dir, pong_fmt if ping_fmt is not None else fmt)
nsd_catalog.write_config(out_dir, vnfd_list)
nsd_catalog.write_to_file(out_dir, ping_fmt if nsd_fmt is not None else fmt)
-
return (ping, pong, nsd_catalog)
parser.add_argument('-f', '--format', default='json')
parser.add_argument('-e', '--epa', action="store_true", default=False)
parser.add_argument('-a', '--aws', action="store_true", default=False)
+ parser.add_argument('--vnf-input-parameter', action="store_true", default=False)
parser.add_argument('-n', '--pingcount', default=NUM_PING_INSTANCES)
parser.add_argument('--ping-image-md5')
parser.add_argument('--pong-image-md5')
parser.add_argument('--ping-cloud-init', default=None)
parser.add_argument('--pong-cloud-init', default=None)
+ parser.add_argument('--charm', action="store_true", default=False)
+ parser.add_argument('-v', '--vca_conf', action="store_true", default=False)
+ parser.add_argument('--virtual-ip', action="store_true", default=False)
+ parser.add_argument('--static-ip', action="store_true", default=False)
+ parser.add_argument('--scale', action="store_true", default=False)
+ parser.add_argument('--primitive-test', action="store_true", default=False)
+
args = parser.parse_args()
outdir = args.outdir
output_format = args.format
use_epa = args.epa
+ use_vnf_input_params = args.vnf_input_parameter
aws = args.aws
- pingcount = args.pingcount
+ pingcount = int(args.pingcount)
use_ping_cloud_init_file = args.ping_cloud_init
use_pong_cloud_init_file = args.pong_cloud_init
generate_ping_pong_descriptors(args.format, True, args.outdir, pingcount,
- ping_md5sum=args.ping_image_md5, pong_md5sum=args.pong_image_md5,
+ ping_md5sum=args.ping_image_md5,
+ pong_md5sum=args.pong_image_md5,
mano_ut=False,
- use_scale_group=False,)
+ use_scale_group=args.scale,
+ use_charm=args.charm,
+ use_vca_conf=args.vca_conf,
+ use_virtual_ip=args.virtual_ip,
+ use_static_ip=args.static_ip,
+ primitive_test=args.primitive_test,
+ vnfd_input_params=use_vnf_input_params
+ )
if __name__ == "__main__":
main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_rate(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ # Get the required and optional parameters
+ params = yaml_cfg['parameters']
+ mgmt_ip = params['mgmt_ip']
+ mgmt_port = 18888
+ if 'mgmt_port' in params:
+ mgmt_port = params['mgmt_port']
+ rate = 1
+ if 'rate' in params:
+ rate = params['rate']
+
+ cmd = 'curl -D /dev/stdout -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"rate\\":{rate}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/ping/rate'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ rate=rate)
+
+ logger.debug("Executing cmd: %s", cmd)
+ proc = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+
+ logger.debug("Process: {}".format(proc))
+
+ return proc.returncode
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_rate-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('ping-rate')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = ping_rate(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import logging
+import os
+import stat
+import subprocess
+import sys
+import time
+import yaml
+
+def ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
+ sh_file = "{}/ping_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logger.debug("Creating script file %s" % sh_file)
+ f = open(sh_file, "w")
+ f.write(r'''
+#!/bin/bash
+
+# Rest API config
+ping_mgmt_ip='{}'
+ping_mgmt_port={}
+
+# VNF specific configuration
+pong_server_ip='{}'
+ping_rate=5
+server_port=5555
+'''.format(mgmt_ip, mgmt_port, pong_cp))
+
+ f.write(r'''
+# Check if the port is open
+DELAY=1
+MAX_TRIES=60
+COUNT=0
+while true; do
+ COUNT=$(expr $COUNT + 1)
+ timeout 1 bash -c "cat < /dev/null > /dev/tcp/${ping_mgmt_ip}/${ping_mgmt_port}"
+ rc=$?
+ if [ $rc -ne 0 ]
+ then
+ echo "Failed to connect to server ${ping_mgmt_ip}:${ping_mgmt_port} for ping with $rc!"
+ if [ ${COUNT} -gt ${MAX_TRIES} ]; then
+ exit $rc
+ fi
+ sleep ${DELAY}
+ else
+ break
+ fi
+done
+
+# Make rest API calls to configure VNF
+curl -D /dev/null \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -X POST \
+ -d "{\"ip\":\"$pong_server_ip\", \"port\":$server_port}" \
+ http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/server
+rc=$?
+if [ $rc -ne 0 ]
+then
+ echo "Failed to set server info for ping!"
+ exit $rc
+fi
+
+curl -D /dev/null \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -X POST \
+ -d "{\"rate\":$ping_rate}" \
+ http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/rate
+rc=$?
+if [ $rc -ne 0 ]
+then
+ echo "Failed to set ping rate!"
+ exit $rc
+fi
+
+output=$(curl -D /dev/null \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
+ -X POST \
+ -d "{\"enable\":true}" \
+ http://${ping_mgmt_ip}:${ping_mgmt_port}/api/v1/ping/adminstatus/state)
+if [[ $output == *"Internal Server Error"* ]]
+then
+ echo $output
+ exit 3
+else
+ echo $output
+fi
+
+exit 0
+''')
+ f.close()
+ os.chmod(sh_file, stat.S_IRWXU)
+ if not dry_run:
+ rc = subprocess.call(sh_file, shell=True)
+ if rc:
+ logger.error("Config failed: {}".format(rc))
+ return False
+ return True
+
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("--dry-run", action="store_true")
+ parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/rift_ping_scale_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ print("Got exception:{}".format(e))
+ raise e
+
+ try:
+ dry_run = args.dry_run
+
+ yaml_str = args.yaml_cfg_file.read()
+ logger.debug("Input YAML file: {}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ # Check if this is post scale out trigger
+ if yaml_cfg['trigger'] != 'post_scale_out':
+ logger.error("Unexpected trigger {}".
+ format(yaml_cfg['trigger']))
+ raise
+
+ pong_cp = ""
+ for vnfr in yaml_cfg['vnfrs_others']:
+ # Find the pong VNFR, assuming vnfr name will
+ # have pong_vnfd as a substring
+ if 'pong_vnfd' in vnfr['name']:
+ for cp in vnfr['connection_points']:
+ logger.debug("Connection point {}".format(cp))
+ if 'cp0' in cp['name']:
+ pong_cp = cp['ip_address']
+ break
+ if not len(pong_cp):
+ logger.error("Did not get Pong cp0 IP")
+ raise
+
+ for vnfr in yaml_cfg['vnfrs_in_group']:
+ mgmt_ip = vnfr['rw_mgmt_ip']
+ mgmt_port = vnfr['rw_mgmt_port']
+ if ping_config(run_dir, mgmt_ip, mgmt_port, pong_cp, logger, dry_run):
+ logger.info("Successfully configured Ping {} at {}".
+ format(vnfr['name'], mgmt_ip))
+ else:
+ logger.error("Config of ping {} with {} failed".
+ format(vnfr['name'], mgmt_ip))
+ raise
+
+ except Exception as e:
+ logger.error("Got exception {}".format(e))
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
def set_rate(mgmt_ip, port, rate):
curl_cmd = '''curl -D /dev/stdout \
- -H "Accept: application/vnd.yang.data+xml" \
- -H "Content-Type: application/vnd.yang.data+json" \
+ -H "Accept: application/json" \
+ -H "Content-Type: application/json" \
-X POST \
-d "{{ \\"rate\\":{ping_rate} }}" \
http://{ping_mgmt_ip}:{ping_mgmt_port}/api/v1/ping/rate
# Check if it is pong vnf
if 'ping_vnfd' in vnfr['name']:
- vnf_type = 'ping'
port = 18888
set_rate(vnfr['mgmt_ip_address'], port, rate)
break
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_setup(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ # Get the required and optional parameters
+ params = yaml_cfg['parameters']
+ mgmt_ip = params['mgmt_ip']
+ mgmt_port = 18888
+ if 'mgmt_port' in params:
+ mgmt_port = params['mgmt_port']
+ pong_ip = params['pong_ip']
+ pong_port = 5555
+ if 'pong_port' in params:
+ pong_port = params['pong_port']
+ rate = 1
+ if 'rate' in params:
+ rate = params['rate']
+
+ cmd = 'curl -D /dev/stdout -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"ip\\":\\"{pong_ip}\\", \\"port\\":{pong_port}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/ping/server'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ pong_ip=pong_ip,
+ pong_port=pong_port)
+
+ logger.debug("Executing cmd: %s", cmd)
+ count = 0
+ delay = 5
+ max_tries = 12
+ rc = 0
+
+ while True:
+ count += 1
+ proc = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+
+ logger.debug("Process rc: {}".format(proc.returncode))
+
+ if proc.returncode == 0:
+ # Check if response is 200 OK
+ resp = proc.stdout.read().decode()
+ if 'HTTP/1.1 200 OK' in resp:
+ rc = 0
+ break
+ logger.error("Got error response: {}".format(resp))
+ rc = 1
+ break
+
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ rc = 7
+ break
+ # Try after delay
+ time.sleep(delay)
+ else:
+ #Exit the loop incase of errors other than connection timeout and response ok
+ err_resp = proc.stderr.read().decode()
+ logger.error("Got error response: {}".format(err_resp))
+ return proc.returncode
+
+ return rc
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_setup-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('ping-setup')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = ping_setup(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def ping_start_stop(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ # Get the required and optional parameters
+ params = yaml_cfg['parameters']
+ mgmt_ip = params['mgmt_ip']
+ mgmt_port = 18888
+ if 'mgmt_port' in params:
+ mgmt_port = params['mgmt_port']
+ start = 'true'
+ if 'start' in params:
+ if not params['start']:
+ start = 'false'
+
+ cmd = 'curl -D /dev/stdout -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"enable\\":{start}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/ping/adminstatus/state'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ start=start)
+
+ logger.debug("Executing cmd: %s", cmd)
+ proc = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process: {}".format(proc))
+
+ rc = proc.returncode
+
+ if rc == 0:
+ # Check if we got 200 OK
+ resp = proc.stdout.read().decode()
+ if 'HTTP/1.1 200 OK' not in resp:
+ logger.error("Got error response: {}".format(resp))
+ rc = 1
+
+ return rc
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_start_stop-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('ping-start-stop')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = ping_start_stop(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def pong_initial_config(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+ def find_vnfr(vnfr_dict, name):
+ try:
+ for k, v in vnfr_dict.items():
+ if v['name'] == name:
+ return v
+ except KeyError:
+ logger.warn("Could not find vnfr for name : %s", name)
+
+ def find_cp_ip(vnfr, cp_name):
+ for cp in vnfr['connection_point']:
+ logger.debug("Connection point: %s", format(cp))
+ if cp_name in cp['name']:
+ return cp['ip_address']
+
+ raise ValueError("Could not find vnfd %s connection point %s", cp_name)
+
+ def find_vnfr_mgmt_ip(vnfr):
+ return vnfr['mgmt_ip_address']
+
+ def get_vnfr_name(vnfr):
+ return vnfr['name']
+
+ def find_vdur_mgmt_ip(vnfr):
+ return vnfr['vdur'][0]['vm_management_ip']
+
+ def find_param_value(param_list, input_param):
+ for item in param_list:
+ logger.debug("Parameter: %s", format(item))
+ if item['name'] == input_param:
+ return item['value']
+
+ # Get the required and optional parameters
+ pong_vnfr = find_vnfr(yaml_cfg['vnfr'], yaml_cfg['vnfr_name'])
+ pong_vnf_mgmt_ip = find_vnfr_mgmt_ip(pong_vnfr)
+ pong_vnf_svc_ip = find_cp_ip(pong_vnfr, "pong_vnfd/cp0")
+
+
+ # Get the required and optional parameters
+ mgmt_ip = pong_vnf_mgmt_ip
+ mgmt_port = 18889
+ service_ip = pong_vnf_svc_ip
+ service_port = 5555
+
+ config_cmd = 'curl -D /dev/null -H "Accept: application/vnd.yang.data' \
+ '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ '-X POST -d "{{\\"ip\\":\\"{service_ip}\\", \\"port\\":{service_port}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/pong/server'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ service_ip=service_ip,
+ service_port=service_port)
+
+ logger.debug("Executing cmd: %s", config_cmd)
+ count = 0
+ delay = 20
+ max_tries = 12
+ rc = 0
+
+ while True:
+ count += 1
+ proc = subprocess.Popen(config_cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process: {}".format(proc))
+
+ if proc.returncode == 0:
+ # Check if response is 200 OK
+ logger.info("Success response ")
+ rc = 0
+ break
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ rc = 7
+ break
+ # Try after delay
+ time.sleep(delay)
+
+ return rc
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/pong_initial_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('pong-initial-config')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = pong_initial_config(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def pong_setup(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ # Get the required and optional parameters
+ params = yaml_cfg['parameters']
+ mgmt_ip = params['mgmt_ip']
+ mgmt_port = 18889
+ if 'mgmt_port' in params:
+ mgmt_port = params['mgmt_port']
+ service_ip = params['service_ip']
+ service_port = 5555
+ if 'service_port' in params:
+ service_port = params['service_port']
+ rate = 1
+ if 'rate' in params:
+ rate = params['rate']
+
+ config_cmd = 'curl -D /dev/stdout -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"ip\\":\\"{service_ip}\\", \\"port\\":{service_port}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/pong/server'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ service_ip=service_ip,
+ service_port=service_port)
+
+ logger.debug("Executing cmd: %s", config_cmd)
+ count = 0
+ delay = 5
+ max_tries = 12
+ rc = 0
+
+ while True:
+ count += 1
+ proc = subprocess.Popen(config_cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process rc: {}".format(proc.returncode))
+
+ if proc.returncode == 0:
+ # Check if response is 200 OK
+ resp = proc.stdout.read().decode()
+ if 'HTTP/1.1 200 OK' in resp:
+ rc = 0
+ break
+ logger.error("Got error response: {}".format(resp))
+ rc = 1
+ break
+
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ rc = 7
+ break
+ # Try after delay
+ time.sleep(delay)
+ else:
+ #Exit the loop incase of errors other than connection timeout and response ok
+ err_resp = proc.stderr.read().decode()
+ logger.error("Got error response: {}".format(err_resp))
+ return proc.returncode
+
+ return rc
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/pong_setup-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('pong-setup')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = pong_setup(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def pong_start_stop(yaml_cfg, logger):
+ '''Use curl to configure ping and set the ping rate'''
+
+ # Get the required and optional parameters
+ params = yaml_cfg['parameters']
+ mgmt_ip = params['mgmt_ip']
+ mgmt_port = 18889
+ if 'mgmt_port' in params:
+ mgmt_port = params['mgmt_port']
+ start = 'true'
+ if 'start' in params:
+ if not params['start']:
+ start = 'false'
+
+ cmd = 'curl -D /dev/stdout -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"enable\\":{start}}}" ' \
+ 'http://{mgmt_ip}:{mgmt_port}/api/v1/pong/adminstatus/state'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=mgmt_port,
+ start=start)
+
+ logger.debug("Executing cmd: %s", cmd)
+ proc = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process: {}".format(proc))
+
+ rc = proc.returncode
+
+ if rc == 0:
+ # Check if we got 200 OK
+ resp = proc.stdout.read().decode()
+ if 'HTTP/1.1 200 OK' not in resp:
+ logger._log.error("Got error response: {}".format(resp))
+ rc = 1
+
+ return rc
+
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/pong_start_stop-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('pong-start-stop')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ rc = pong_start_stop(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
+
+ except Exception as e:
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2017 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import os
+import sys
+import random
+import paramiko
+import yaml
+from glob import glob
+
+
+def copy_file_ssh_sftp(server, remote_dir, remote_file, local_file):
+ """Copy file to VM."""
+ sshclient = paramiko.SSHClient()
+ sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ sshclient.load_system_host_keys(filename="/dev/null")
+ sshclient.connect(server, username="fedora", password="fedora")
+ sftpclient = sshclient.open_sftp()
+ sftpclient.put(local_file, remote_dir + '/' + remote_file)
+ sshclient.close()
+
+
+def get_full_path(file_name, production_launchpad=True):
+ """Return the full path for the init cfg file."""
+ mpath = os.path.join(
+ os.getenv('RIFT_INSTALL'), 'var', 'rift')
+ if not production_launchpad:
+ launchpad_folder = glob('{}/*mgmt-vm-lp-2'.format(mpath))[0]
+ else:
+ launchpad_folder = ''
+ mpath = os.path.join(
+ os.getenv('RIFT_INSTALL'), 'var', 'rift', launchpad_folder,
+ 'launchpad', 'packages', 'vnfd', 'default')
+ vnfd_folder = random.choice(
+ [x for x in os.listdir(mpath) if os.path.isdir(
+ os.path.join(mpath, x))])
+ full_path = glob(
+ '{}/{}/cloud_init/{}'.format(mpath, vnfd_folder, file_name))[0]
+ file_name = os.path.basename(os.path.normpath(full_path))
+ return full_path, file_name
+
+
+def exists_remote(host, path):
+ """Test if a file exists at path on a host accessible with SSH."""
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ try:
+ ssh.connect(host, username="fedora", password="fedora")
+ sftp = ssh.open_sftp()
+ try:
+ sftp.stat(path)
+ except Exception:
+ raise Exception('Transfered file not found on the remote host')
+ ssh.close()
+ except paramiko.SSHException:
+ print("Connection Error")
+
+
+def primitive_test(yaml_cfg):
+ """Transfer a cloud init file from the vnfd descriptor package.
+
+ Verify that the file is transfered.
+ """
+ for index, vnfr in yaml_cfg['vnfr_data_map'].items():
+ vnfd_ip = vnfr['mgmt_interface']['ip_address']
+ file_name = '*_cloud_init.cfg'
+ local_file, file_name = get_full_path(file_name)
+ copy_file_ssh_sftp(vnfd_ip, '/tmp/', file_name, local_file)
+ remote_file_path = os.path.join(
+ '/'
+ 'tmp',
+ file_name)
+ exists_remote(vnfd_ip, remote_file_path)
+
+
+def main(argv=sys.argv[1:]):
+ """Main."""
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument(
+ "-q", "--quiet", dest="verbose",
+ action="store_false")
+ args = parser.parse_args()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ yaml_cfg = yaml.load(yaml_str)
+
+ primitive_test(yaml_cfg)
+
+ except Exception as e:
+ print("Exception: {}".format(e))
+ raise e
+
+if __name__ == "__main__":
+ main()
'''Use curl and set admin status to enable on pong and ping vnfs'''
def enable_service(mgmt_ip, port, vnf_type):
- curl_cmd = 'curl -D /dev/stdout -H "Accept: application/vnd.yang.data' \
- '+xml" -H "Content-Type: application/vnd.yang.data+json" ' \
+ curl_cmd = 'curl -D /dev/null -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
'-X POST -d "{{\\"enable\\":true}}" http://{mgmt_ip}:' \
'{mgmt_port}/api/v1/{vnf_type}/adminstatus/state'. \
format(
mgmt_port=port,
vnf_type=vnf_type)
- logger.debug("Executing cmd: %s", curl_cmd)
- subprocess.check_call(curl_cmd, shell=True)
+ count = 0
+ delay = 10
+ max_tries = 10
+ while True:
+ count += 1
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ proc = subprocess.Popen(curl_cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ proc.wait()
+ logger.debug("Process: {}".format(proc))
+
+ if proc.returncode == 0:
+ # Check if response is 200 OK
+ logger.info("Got success response")
+ break
+
+ elif proc.returncode == 7:
+ # Connection timeout
+ if count >= max_tries:
+ logger.error("Connect failed for {}. Failing".format(count))
+ break
+ # Try after delay
+ time.sleep(delay)
+ else:
+ #Exit the loop incase of errors other than connection timeout and response ok
+ err_resp = proc.stderr.read().decode()
+ logger.error("Got error response: {}".format(err_resp))
+ return proc.returncode
+
+ return proc.returncode
# Enable pong service first
for index, vnfr in yaml_cfg['vnfr'].items():
if 'pong_vnfd' in vnfr['name']:
vnf_type = 'pong'
port = 18889
- enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ rc = enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ if rc != 0:
+ logger.error("Enable service for pong failed: {}".
+ format(rc))
+ return rc
break
+
# Add a delay to provide pong port to come up
- time.sleep(0.1)
+ time.sleep(1)
# Enable ping service next
for index, vnfr in yaml_cfg['vnfr'].items():
if 'ping_vnfd' in vnfr['name']:
vnf_type = 'ping'
port = 18888
- enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ rc = enable_service(vnfr['mgmt_ip_address'], port, vnf_type)
break
+ return rc
+
+
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/ping_pong_start_traffic-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
- logging.basicConfig(filename=log_file, level=logging.DEBUG)
- logger = logging.getLogger()
- except Exception as e:
- print("Exception in {}: {}".format(__file__, e))
- sys.exit(1)
+ # logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger('ping-pong-start-traffic')
+ logger.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(log_file)
+ fh.setLevel(logging.DEBUG)
- try:
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
ch.setFormatter(formatter)
+ logger.addHandler(fh)
logger.addHandler(ch)
except Exception as e:
- logger.exception(e)
- raise e
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
try:
+ logger.debug("Input file: {}".format(args.yaml_cfg_file.name))
yaml_str = args.yaml_cfg_file.read()
- # logger.debug("Input YAML file:\n{}".format(yaml_str))
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML: {}".format(yaml_cfg))
- start_traffic(yaml_cfg, logger)
+ rc = start_traffic(yaml_cfg, logger)
+ logger.info("Return code: {}".format(rc))
+ sys.exit(rc)
except Exception as e:
- logger.exception(e)
- raise e
+ logger.exception("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
if __name__ == "__main__":
main()
--- /dev/null
+#!/usr/bin/env python3
+
+############################################################################
+# Copyright 2016 RIFT.IO Inc #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+############################################################################
+
+
+import argparse
+import logging
+import os
+import subprocess
+import sys
+import time
+
+import yaml
+
+
+def stop_traffic(yaml_cfg, logger):
+ '''Use curl and set admin status to enable on pong and ping vnfs'''
+
+ def disable_service(mgmt_ip, port, vnf_type):
+ curl_cmd = 'curl -D /dev/null -H "Accept: application/json" ' \
+ '-H "Content-Type: application/json" ' \
+ '-X POST -d "{{\\"enable\\":false}}" http://{mgmt_ip}:' \
+ '{mgmt_port}/api/v1/{vnf_type}/adminstatus/state'. \
+ format(
+ mgmt_ip=mgmt_ip,
+ mgmt_port=port,
+ vnf_type=vnf_type)
+
+ logger.debug("Executing cmd: %s", curl_cmd)
+ subprocess.check_call(curl_cmd, shell=True)
+
+ # Disable ping service first
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'ping_vnfd' in vnfr['name']:
+ vnf_type = 'ping'
+ port = 18888
+ disable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+ # Add a delay
+ time.sleep(0.1)
+
+ # Disable pong service next
+ for index, vnfr in yaml_cfg['vnfr'].items():
+ logger.debug("VNFR {}: {}".format(index, vnfr))
+
+ # Check if it is pong vnf
+ if 'pong_vnfd' in vnfr['name']:
+ vnf_type = 'pong'
+ port = 18889
+ disable_service(vnfr['mgmt_ip_address'], port, vnf_type)
+ break
+
+def main(argv=sys.argv[1:]):
+ try:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
+ parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
+ args = parser.parse_args()
+
+ run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
+ if not os.path.exists(run_dir):
+ os.makedirs(run_dir)
+ log_file = "{}/ping_pong_stop_traffic-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
+ logging.basicConfig(filename=log_file, level=logging.DEBUG)
+ logger = logging.getLogger()
+
+ except Exception as e:
+ print("Exception in {}: {}".format(__file__, e))
+ sys.exit(1)
+
+ try:
+ ch = logging.StreamHandler()
+ if args.verbose:
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+
+ # create formatter and add it to the handlers
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ logger.addHandler(ch)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+ try:
+ yaml_str = args.yaml_cfg_file.read()
+ # logger.debug("Input YAML file:\n{}".format(yaml_str))
+ yaml_cfg = yaml.load(yaml_str)
+ logger.debug("Input YAML: {}".format(yaml_cfg))
+
+ stop_traffic(yaml_cfg, logger)
+
+ except Exception as e:
+ logger.exception(e)
+ raise e
+
+if __name__ == "__main__":
+ main()
fi
}
-echo -n "wait for system"
-while ! system_is_up
-do
- echo -n "."
- sleep 5s
-done
+#echo -n "wait for system"
+#while ! system_is_up
+#do
+# echo -n "."
+# sleep 5s
+#done
echo "system is up"
echo "must supply transaction id to wait for"
exit -1
fi
-
- response=$(curl --silent --insecure https://${lp_ip}:4567/api/upload/${transaction_id}/state)
+
+ project="default"
+ response=$(curl --silent --insecure https://${lp_ip}:8008/api/operational/project/${project}/create-jobs/job/${transaction_id})
transaction_state=$(echo ${response} | awk -F "status" '{print $2}' | awk '{print $2}')
transaction_state=${transaction_state:1:-2}
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME models)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs
plugins
openmano
openmano
openmano_cleanup.sh
DESTINATION usr/bin
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
rift/openmano/__init__.py
rift/openmano/rift2openmano.py
rift/openmano/openmano_client.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
return None
+ def vnfs(self):
+ url = "http://{host}:{port}/openmano/{tenant}/vnfs".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant
+ )
+ resp = self._session.get(url, headers={'content-type': 'application/json'})
+
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()
+
+ def vnf(self, vnf_id):
+ vnf_uuid = None
+ try:
+ vnfs = self.vnfs()
+ for vnf in vnfs["vnfs"]:
+ # Rift vnf ID gets mapped to osm_id in OpenMano
+ if vnf_id == vnf["osm_id"]:
+ vnf_uuid = vnf["uuid"]
+ break
+ except Exception as e:
+ raise e
+
+ if not vnf_uuid:
+ return None
+ else:
+ url = "http://{host}:{port}/openmano/{tenant}/vnfs/{uuid}".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant,
+ uuid=vnf_uuid
+ )
+ resp = self._session.get(url, headers={'content-type': 'application/json'})
+
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()['vnf']
+
+ def scenarios(self):
+ url = "http://{host}:{port}/openmano/{tenant}/scenarios".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant
+ )
+ resp = self._session.get(url, headers={'content-type': 'application/json'})
+
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()
+
+ def scenario(self, scenario_id):
+ scenario_uuid = None
+ try:
+ scenarios = self.scenarios()
+ for scenario in scenarios["scenarios"]:
+ # Rift NS ID gets mapped to osm_id in OpenMano
+ if scenario_id == scenario["osm_id"]:
+ scenario_uuid = scenario["uuid"]
+ break
+ except Exception as e:
+ raise e
+
+ if not scenario_uuid:
+ return None
+ else:
+ url = "http://{host}:{port}/openmano/{tenant}/scenarios/{uuid}".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant,
+ uuid=scenario_uuid
+ )
+ resp = self._session.get(url, headers={'content-type': 'application/json'})
+
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()['scenario']
+
+ def post_vnfd_v3(self, vnfd_body):
+ # Check if the VNF is present at the RO
+ vnf_rift_id = vnfd_body["vnfd:vnfd-catalog"]["vnfd"][0]["id"]
+ vnf_check = self.vnf(vnf_rift_id)
+
+ if not vnf_check:
+ url = "http://{host}:{port}/openmano/v3/{tenant}/vnfd".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant
+ )
+ payload_data = json.dumps(vnfd_body)
+ resp = self._session.post(url, headers={'content-type': 'application/json'},
+ data=payload_data)
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()['vnfd'][0]
+
+ else:
+ return vnf_check
+
+ def post_nsd_v3(self, nsd_body):
+ # Check if the NS (Scenario) is present at the RO
+ scenario_rift_id = nsd_body["nsd:nsd-catalog"]["nsd"][0]["id"]
+ scenario_check = self.scenario(scenario_rift_id)
+
+ if not scenario_check:
+ url = "http://{host}:{port}/openmano/v3/{tenant}/nsd".format(
+ host=self._host,
+ port=self._port,
+ tenant=self._tenant
+ )
+ payload_data = json.dumps(nsd_body)
+ resp = self._session.post(url, headers={'content-type': 'application/json'},
+ data=payload_data)
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ raise InstanceStatusError(e)
+
+ return resp.json()['nsd'][0]
+ else:
+ return scenario_check
+
class OpenmanoCliAPI(object):
""" This class implements the necessary funtionality to interact with """
import sys
import tempfile
import yaml
+import ast
+import json
import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('NsdYang', '1.0')
+gi.require_version('VnfdYang', '1.0')
from gi.repository import (
RwYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
+ NsdYang as NsdYang,
+ VnfdYang as VnfdYang
)
import rift.package.store
class RiftNSD(object):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_module('nsd')
- model.load_module('rw-nsd')
-
+
def __init__(self, descriptor):
self._nsd = descriptor
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
descriptor.from_xml_v2(RiftNSD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ descriptor = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
descriptor.from_yaml(RiftNSD.model, hdl.read())
return cls(descriptor)
- @classmethod
- def from_dict(cls, nsd_dict):
- descriptor = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(nsd_dict)
- return cls(descriptor)
+ def from_dict(self):
+ descriptor = NsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict(self._nsd.as_dict(), ignore_missing_keys=True).to_json_without_namespace(RiftNSD.model)
+ return descriptor
class RiftVNFD(object):
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_module('vnfd')
- model.load_module('rw-vnfd')
-
+
def __init__(self, descriptor):
self._vnfd = descriptor
@classmethod
def from_xml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
descriptor.from_xml_v2(RiftVNFD.model, hdl.read())
return cls(descriptor)
@classmethod
def from_yaml_file_hdl(cls, hdl):
hdl.seek(0)
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ descriptor = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
descriptor.from_yaml(RiftVNFD.model, hdl.read())
return cls(descriptor)
- @classmethod
- def from_dict(cls, vnfd_dict):
- descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(vnfd_dict)
- return cls(descriptor)
+ def from_dict(self):
+ descriptor = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict(self._vnfd.as_dict(), ignore_missing_keys=True).to_json_without_namespace(RiftVNFD.model)
+ return descriptor
def is_writable_directory(dir_path):
return vnfd_name + "__" + str(member_idx)
-def rift2openmano_nsd(rift_nsd, rift_vnfds, openmano_vnfd_ids, rift_vnfd_id=None):
- if rift_vnfd_id is None:
- for vnfd_id in rift_nsd.vnfd_ids:
- if vnfd_id not in rift_vnfds:
- raise VNFNotFoundError("VNF id %s not provided" % vnfd_id)
-
- openmano = {}
- openmano["name"] = rift_nsd.name
- if rift_vnfd_id is not None:
- for scaling_groups in rift_nsd.scaling_group_descriptor:
- openmano["name"] += scaling_groups.name
- openmano["description"] = rift_nsd.description
- topology = {}
- openmano["topology"] = topology
-
- topology["nodes"] = {}
- for vnfd in rift_nsd.constituent_vnfds:
- vnfd_id = vnfd.vnfd_id_ref
- if rift_vnfd_id is not None and rift_vnfd_id != vnfd_id:
- continue
- rift_vnfd = rift_vnfds[vnfd_id]
- member_idx = vnfd.member_vnf_index
- openmano_vnfd_id = openmano_vnfd_ids.get(vnfd_id,None)
- if openmano_vnfd_id:
- topology["nodes"][rift_vnfd.name + "__" + str(member_idx)] = {
- "type": "VNF",
- "vnf_id": openmano_vnfd_id
- }
- else:
- topology["nodes"][rift_vnfd.name + "__" + str(member_idx)] = {
- "type": "VNF",
- "VNF model": rift_vnfd.name
- }
-
- for vld in rift_nsd.vlds:
- # Openmano has both bridge_net and dataplane_net models for network types
- # For now, since we are using openmano in developer mode lets just hardcode
- # to bridge_net since it won't matter anyways.
- # topology["nodes"][vld.name] = {"type": "network", "model": "bridge_net"}
- pass
-
- topology["connections"] = {}
- for vld in rift_nsd.vlds:
-
- # Create a connections entry for each external VLD
- topology["connections"][vld.name] = {}
- topology["connections"][vld.name]["nodes"] = []
-
- #if vld.vim_network_name:
- if True:
- if vld.name not in topology["nodes"]:
- topology["nodes"][vld.name] = {
- "type": "external_network",
- "model": vld.name,
- }
-
- # Add the external network to the list of connection points
- topology["connections"][vld.name]["nodes"].append(
- {vld.name: "0"}
- )
- elif vld.provider_network.has_field("physical_network"):
- # Add the external datacenter network to the topology
- # node list if it isn't already added
- ext_net_name = vld.provider_network.physical_network
- ext_net_name_with_seg = ext_net_name
- if vld.provider_network.has_field("segmentation_id"):
- ext_net_name_with_seg += ":{}".format(vld.provider_network.segmentation_id)
-
- if ext_net_name not in topology["nodes"]:
- topology["nodes"][ext_net_name] = {
- "type": "external_network",
- "model": ext_net_name_with_seg,
- }
-
- # Add the external network to the list of connection points
- topology["connections"][vld.name]["nodes"].append(
- {ext_net_name: "0"}
- )
-
-
- for vnfd_cp in vld.vnfd_connection_point_ref:
-
- # Get the RIFT VNF for this external VLD connection point
- vnfd = rift_vnfds[vnfd_cp.vnfd_id_ref]
-
- # For each VNF in this connection, use the same interface name
- topology["connections"][vld.name]["type"] = "link"
- # Vnf ref is the vnf name with the member_vnf_idx appended
- member_idx = vnfd_cp.member_vnf_index_ref
- vnf_ref = vnfd.name + "__" + str(member_idx)
- topology["connections"][vld.name]["nodes"].append(
- {
- vnf_ref: vnfd_cp.vnfd_connection_point_ref
- }
- )
- return openmano
-
-def rift2openmano_vnfd_nsd(rift_nsd, rift_vnfds, openmano_vnfd_ids,rift_vnfd_id=None):
-
- if rift_vnfd_id not in rift_vnfds:
- raise VNFNotFoundError("VNF id %s not provided" % rift_vnfd_id)
-
- openmano_vnfd_nsd = {}
- for groups in rift_nsd.scaling_group_descriptor:
- openmano_vnfd_nsd["name"] = rift_vnfd_id+'__'+'scaling_group'+'__'+groups.name
- openmano_vnfd_nsd["description"] = "Scaling Group"
- topology = {}
- openmano_vnfd_nsd["topology"] = topology
- topology["connections"] = {}
- topology["nodes"] = {}
- tst_index = []
- openmano_vnfd_id = openmano_vnfd_ids.get(rift_vnfd_id,None)
- for rvnfd_id in rift_nsd.constituent_vnfds:
- if rvnfd_id.vnfd_id_ref == rift_vnfd_id:
- rift_vnfd = rift_vnfds[rift_vnfd_id]
- topology["nodes"][rift_vnfd.name +'__'+str(rvnfd_id.member_vnf_index)] = {
- "type": "VNF",
- "vnf_id": openmano_vnfd_id
- }
-
- for vld in rift_nsd.vlds:
-
- # Create a connections entry for each external VLD
- topology["connections"][vld.name] = {}
- topology["connections"][vld.name]["nodes"] = []
- if True:
- if vld.name not in topology["nodes"]:
- topology["nodes"][vld.name] = {
- "type": "external_network",
- "model": vld.name,
- }
- topology["connections"][vld.name]["nodes"].append(
- {vld.name: "0"}
- )
-
-
-
- for vnfd_cp in vld.vnfd_connection_point_ref:
- if not rift_vnfd_id in vnfd_cp.vnfd_id_ref:
- continue
- if rift_vnfd_id in vnfd_cp.vnfd_id_ref:
-
- # Get the RIFT VNF for this external VLD connection point
- vnfd = rift_vnfds[vnfd_cp.vnfd_id_ref]
-
-
- # For each VNF in this connection, use the same interface name
- topology["connections"][vld.name]["type"] = "link"
- # Vnf ref is the vnf name with the member_vnf_idx appended
- member_idx = vnfd_cp.member_vnf_index_ref
- vnf_ref = vnfd.name + "__" + str(member_idx)
- topology["connections"][vld.name]["nodes"].append(
- {
- vnf_ref: vnfd_cp.vnfd_connection_point_ref
- }
- )
- return openmano_vnfd_nsd
-
-
-def cloud_init(rift_vnfd_id, vdu):
+def rift2openmano_nsd(rift_nsd, rift_vnfds, openmano_vnfd_ids, http_api, rift_vnfd_id=None):
+ try:
+ if rift_vnfd_id is None:
+ for vnfd_id in rift_nsd.vnfd_ids:
+ if vnfd_id not in rift_vnfds:
+ raise VNFNotFoundError("VNF id %s not provided" % vnfd_id)
+
+ openmano_nsd_im_body = json.loads(rift_nsd.from_dict())
+ openmano_nsd_api_format = {
+ "nsd:nsd-catalog": {
+ "nsd": [openmano_nsd_im_body['nsd-catalog']['nsd'][0]]
+ }
+ }
+
+ openmano_nsd = http_api.post_nsd_v3(openmano_nsd_api_format)
+
+ return openmano_nsd
+
+ except Exception as e:
+ logger.error(e)
+ raise e
+
+def rift2openmano_vnfd_nsd(rift_nsd, rift_vnfds, openmano_vnfd_ids, http_api, rift_vnfd_id=None):
+ try:
+ if rift_vnfd_id not in rift_vnfds:
+ raise VNFNotFoundError("VNF id %s not provided" % rift_vnfd_id)
+
+ # This is the scaling NSD Descriptor. Can use the NSD IM Model.
+ openmano_nsd_im_body = json.loads(rift_nsd.from_dict())
+
+ openmano_nsd_api_format = {
+ "nsd:nsd-catalog": {
+ "nsd": [openmano_nsd_im_body['nsd-catalog']['nsd'][0]]
+ }
+ }
+
+ openmano_nsd = http_api.post_nsd_v3(openmano_nsd_im_body)
+
+ return openmano_nsd
+
+ except Exception as e:
+ logger.error(e)
+ raise e
+
+
+def cloud_init(rift_vnfd_id, vdu, project_name='default'):
""" Populate cloud_init with script from
either the inline contents or from the file provided
"""
- vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(logger)
+ vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(logger, project=project_name)
cloud_init_msg = None
- if vdu.cloud_init is not None:
- logger.debug("cloud_init script provided inline %s", vdu.cloud_init)
- cloud_init_msg = vdu.cloud_init
- elif vdu.cloud_init_file is not None:
- # Get cloud-init script contents from the file provided in the cloud_init_file param
- logger.debug("cloud_init script provided in file %s", vdu.cloud_init_file)
- filename = vdu.cloud_init_file
+ if 'cloud_init' in vdu:
+ logger.debug("cloud_init script provided inline %s", vdu['cloud_init'])
+ cloud_init_msg = vdu['cloud_init']
+ elif 'cloud_init_file' in vdu:
+ # Get cloud-init script contents from the file provided in the cloud_init_file param
+ logger.debug("cloud_init script provided in file %s", vdu['cloud_init_file'])
+ filename = vdu['cloud_init_file']
vnfd_package_store.refresh()
stored_package = vnfd_package_store.get_package(rift_vnfd_id)
cloud_init_extractor = rift.package.cloud_init.PackageCloudInitExtractor(logger)
logger.debug("Current cloud init msg is {}".format(cloud_init_msg))
return cloud_init_msg
-def config_file_init(rift_vnfd_id, vdu, cfg_file):
+def config_file_init(rift_vnfd_id, vdu, cfg_file, project_name='default'):
""" Populate config file init with file provided
"""
- vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(logger)
+ vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(logger, project=project_name)
# Get script contents from the file provided in the cloud_init directory
logger.debug("config file script provided in file {}".format(cfg_file))
logger.debug("Current config file msg is {}".format(cfg_file_msg))
return cfg_file_msg
-def rift2openmano_vnfd(rift_vnfd, rift_nsd):
- openmano_vnf = {"vnf":{}}
- vnf = openmano_vnf["vnf"]
-
- vnf["name"] = rift_vnfd.name
- vnf["description"] = rift_vnfd.description
-
- vnf["external-connections"] = []
-
- def find_vdu_and_ext_if_by_cp_ref(cp_ref_name):
- for vdu in rift_vnfd.vdus:
- for ext_if in vdu.external_interface:
- if ext_if.vnfd_connection_point_ref == cp_ref_name:
- return vdu, ext_if
-
- raise ValueError("External connection point reference %s not found" % cp_ref_name)
-
- def find_vdu_and_int_if_by_cp_ref(cp_ref_id):
- for vdu in rift_vnfd.vdus:
- for int_if in vdu.internal_interface:
- if int_if.vdu_internal_connection_point_ref == cp_ref_id:
- return vdu, int_if
-
- raise ValueError("Internal connection point reference %s not found" % cp_ref_id)
-
- def rift2openmano_if_type(ext_if):
-
- cp_ref_name = ext_if.vnfd_connection_point_ref
- for vld in rift_nsd.vlds:
-
- # if it is an explicit mgmt_network then check if the given
- # cp_ref is a part of it
- if not vld.mgmt_network:
- continue
-
- for vld_cp in vld.vnfd_connection_point_ref:
- if vld_cp.vnfd_connection_point_ref == cp_ref_name:
- return "mgmt"
-
-
- rift_type = ext_if.virtual_interface.type_yang
- # Retaining it for backward compatibility!
- if rift_type == "OM_MGMT":
- return "mgmt"
- elif rift_type == "VIRTIO" or rift_type == "E1000":
- return "bridge"
- else:
- return "data"
+def rift2openmano_vnfd(rift_vnfd, rift_nsd, http_api):
+ try:
+ openmano_vnfd_im_body = json.loads(rift_vnfd.from_dict())
+
+ # All type_yang leafs renamed to type
+
+
+ vnfd_dict = openmano_vnfd_im_body['vnfd-catalog']['vnfd'][0]
+
+ if 'vdu' in vnfd_dict:
+ for vdu in vnfd_dict['vdu']:
+ if 'cloud_init_file' in vdu:
+ # Replacing the leaf with the actual contents of the file.
+ # The RO does not have the ability to read files yet.
+ vdu['cloud_init_file'] = cloud_init(openmano_vnfd_im_body.id, vdu)
+ elif 'cloud_init' in vdu:
+ vdu['cloud_init'] = cloud_init(openmano_vnfd_im_body.id, vdu)
+
+ if 'supplemental_boot_data' in vdu:
+ if 'config_file' in vdu['supplemental_boot_data']:
+ for config_file in vdu['supplemental_boot_data']['config_file']:
+ # Replacing the leaf with the actual contents of the file.
+ # The RO does not have the ability to read files yet.
+ config_file['source'] = config_file_init(openmano_vnfd_im_body.id, vdu, config_file['source'])
+
+ openmano_vnfd_api_format = {
+ "vnfd:vnfd-catalog": {
+ "vnfd": [vnfd_dict]
+ }
+ }
+
+ openmano_vnfd = http_api.post_vnfd_v3(openmano_vnfd_api_format)
+
+ return openmano_vnfd
+
+ except Exception as e:
+ logger.error(e)
+ raise e
- def rift2openmano_vif(rift_type):
- if rift_type == "VIRTIO":
- return "virtio"
- elif rift_type == "E1000":
- return "e1000"
- else:
- raise ValueError("VDU Virtual Interface type {} not supported".format(rift_type))
-
- # Add all external connections
- cp_to_port_security_map = {}
-
- for cp in rift_vnfd.cps:
- # Find the VDU and and external interface for this connection point
- vdu, ext_if = find_vdu_and_ext_if_by_cp_ref(cp.name)
- connection = {
- "name": cp.name,
- "type": rift2openmano_if_type(ext_if),
- "VNFC": vdu.name,
- "local_iface_name": ext_if.name,
- "description": "%s iface on VDU %s" % (ext_if.name, vdu.name),
- }
-
- if cp.has_field('port_security_enabled'):
- cp_to_port_security_map[cp.name] = cp.port_security_enabled
- vnf["external-connections"].append(connection)
-
- # Add all internal networks
- for vld in rift_vnfd.internal_vlds:
- connection = {
- "name": vld.name,
- "description": vld.description,
- "type": "bridge",
- "elements": [],
- }
-
- # Add the specific VDU connection points
- for int_cp in vld.internal_connection_point:
- vdu, int_if = find_vdu_and_int_if_by_cp_ref(int_cp.id_ref)
- connection["elements"].append({
- "VNFC": vdu.name,
- "local_iface_name": int_if.name,
- })
- if "internal-connections" not in vnf:
- vnf["internal-connections"] = []
-
- vnf["internal-connections"].append(connection)
-
- # Add VDU's
- vnf["VNFC"] = []
- for vdu in rift_vnfd.vdus:
- vnfc = {
- "name": vdu.name,
- "description": vdu.name,
- "bridge-ifaces": [],
- }
-
- if vdu.vm_flavor.has_field("storage_gb") and vdu.vm_flavor.storage_gb:
- vnfc["disk"] = vdu.vm_flavor.storage_gb
-
- if vdu.has_field("image"):
- if os.path.isabs(vdu.image):
- vnfc["VNFC image"] = vdu.image
- else:
- vnfc["image name"] = vdu.image
- if vdu.has_field("image_checksum"):
- vnfc["image checksum"] = vdu.image_checksum
-
- dedicated_int = False
- for intf in list(vdu.internal_interface) + list(vdu.external_interface):
- if intf.virtual_interface.type_yang in ["SR_IOV", "PCI_PASSTHROUGH"]:
- dedicated_int = True
- if vdu.guest_epa.has_field("numa_node_policy") or dedicated_int:
- vnfc["numas"] = [{
- "memory": max(int(vdu.vm_flavor.memory_mb/1024), 1),
- "interfaces":[],
- }]
- numa_node_policy = vdu.guest_epa.numa_node_policy
- if numa_node_policy.has_field("node"):
- numa_node = numa_node_policy.node[0]
-
- if numa_node.has_field("num_cores"):
- vnfc["numas"][0]["cores"] = numa_node.num_cores
-
- if numa_node.has_field("paired_threads"):
- if numa_node.paired_threads.has_field("num_paired_threads"):
- vnfc["numas"][0]["paired-threads"] = numa_node.paired_threads.num_paired_threads
- if len(numa_node.paired_threads.paired_thread_ids) > 0:
- vnfc["numas"][0]["paired-threads-id"] = []
- for pair in numa_node.paired_threads.paired_thread_ids:
- vnfc["numas"][0]["paired-threads-id"].append(
- [pair.thread_a, pair.thread_b]
- )
-
- if numa_node.has_field("num_threads"):
- vnfc["numas"][0]["threads"] = numa_node.num_threads
- else:
- if vdu.vm_flavor.has_field("vcpu_count"):
- vnfc["numas"][0]["cores"] = max(vdu.vm_flavor.vcpu_count, 1)
-
- if vdu.vm_flavor.has_field("vcpu_count") and vdu.vm_flavor.vcpu_count:
- vnfc["vcpus"] = vdu.vm_flavor.vcpu_count
-
- if vdu.vm_flavor.has_field("memory_mb") and vdu.vm_flavor.memory_mb:
- vnfc["ram"] = vdu.vm_flavor.memory_mb
-
-
- if vdu.has_field("hypervisor_epa"):
- vnfc["hypervisor"] = {}
- if vdu.hypervisor_epa.has_field("type"):
- if vdu.hypervisor_epa.type_yang == "REQUIRE_KVM":
- vnfc["hypervisor"]["type"] = "QEMU-kvm"
-
- if vdu.hypervisor_epa.has_field("version"):
- vnfc["hypervisor"]["version"] = vdu.hypervisor_epa.version
-
- if vdu.has_field("host_epa"):
- vnfc["processor"] = {}
- if vdu.host_epa.has_field("om_cpu_model_string"):
- vnfc["processor"]["model"] = vdu.host_epa.om_cpu_model_string
- if vdu.host_epa.has_field("om_cpu_feature"):
- vnfc["processor"]["features"] = []
- for feature in vdu.host_epa.om_cpu_feature:
- vnfc["processor"]["features"].append(feature.feature)
-
- if vdu.has_field("volumes"):
- vnfc["devices"] = []
- # Sort volumes as device-list is implictly ordered by Openmano
- newvollist = sorted(vdu.volumes, key=lambda k: k.name)
- for iter_num, volume in enumerate(newvollist):
- if iter_num == 0:
- # Convert the first volume to vnfc.image
- if os.path.isabs(volume.image):
- vnfc["VNFC image"] = volume.image
- else:
- vnfc["image name"] = volume.image
- if volume.has_field("image_checksum"):
- vnfc["image checksum"] = volume.image_checksum
- else:
- # Add Openmano devices
- device = {}
- device["type"] = volume.device_type
- if volume.has_field("size"):
- device["size"] = volume.size
- if volume.has_field("image"):
- device["image name"] = volume.image
- if volume.has_field("image_checksum"):
- device["image checksum"] = volume.image_checksum
- vnfc["devices"].append(device)
-
- vnfc_boot_data_init = False
- if vdu.has_field("cloud_init") or vdu.has_field("cloud_init_file"):
- vnfc['boot-data'] = dict()
- vnfc_boot_data_init = True
- vnfc['boot-data']['user-data'] = cloud_init(rift_vnfd.id, vdu)
-
- if vdu.has_field("supplemental_boot_data"):
- if vdu.supplemental_boot_data.has_field('boot_data_drive'):
- if vdu.supplemental_boot_data.boot_data_drive is True:
- if vnfc_boot_data_init is False:
- vnfc['boot-data'] = dict()
- vnfc_boot_data_init = True
- vnfc['boot-data']['boot-data-drive'] = vdu.supplemental_boot_data.boot_data_drive
-
- if vdu.supplemental_boot_data.has_field('config_file'):
- om_cfgfile_list = list()
- for custom_config_file in vdu.supplemental_boot_data.config_file:
- cfg_source = config_file_init(rift_vnfd.id, vdu, custom_config_file.source)
- om_cfgfile_list.append({"dest":custom_config_file.dest, "content": cfg_source})
- vnfc['boot-data']['config-files'] = om_cfgfile_list
-
-
- vnf["VNFC"].append(vnfc)
-
- for int_if in list(vdu.internal_interface) + list(vdu.external_interface):
- intf = {
- "name": int_if.name,
- }
- if int_if.virtual_interface.has_field("vpci"):
- intf["vpci"] = int_if.virtual_interface.vpci
-
- if int_if.virtual_interface.type_yang in ["VIRTIO", "E1000"]:
- intf["model"] = rift2openmano_vif(int_if.virtual_interface.type_yang)
- vnfc["bridge-ifaces"].append(intf)
-
- elif int_if.virtual_interface.type_yang in ["OM_MGMT"]:
- vnfc["bridge-ifaces"].append(intf)
-
- elif int_if.virtual_interface.type_yang == "SR_IOV":
- intf["bandwidth"] = "10 Gbps"
- intf["dedicated"] = "no"
- vnfc["numas"][0]["interfaces"].append(intf)
-
- elif int_if.virtual_interface.type_yang == "PCI_PASSTHROUGH":
- intf["bandwidth"] = "10 Gbps"
- intf["dedicated"] = "yes"
- if "interfaces" not in vnfc["numas"][0]:
- vnfc["numas"][0]["interfaces"] = []
- vnfc["numas"][0]["interfaces"].append(intf)
- else:
- raise ValueError("Interface type %s not supported" % int_if.virtual_interface)
-
- if int_if.virtual_interface.has_field("bandwidth"):
- if int_if.virtual_interface.bandwidth != 0:
- bps = int_if.virtual_interface.bandwidth
-
- # Calculate the bits per second conversion
- for x in [('M', 1000000), ('G', 1000000000)]:
- if bps/x[1] >= 1:
- intf["bandwidth"] = "{} {}bps".format(math.ceil(bps/x[1]), x[0])
-
- for bridge_iface in vnfc["bridge-ifaces"]:
- if bridge_iface['name'] in cp_to_port_security_map:
- bridge_iface['port-security'] = cp_to_port_security_map[bridge_iface['name']]
- # Sort bridge-ifaces-list TODO sort others
- newlist = sorted(vnfc["bridge-ifaces"], key=lambda k: k['name'])
- vnfc["bridge-ifaces"] = newlist
-
- return openmano_vnf
def parse_args(argv=sys.argv[1:]):
vnfd_nsd = rift2openmano_vnfd_nsd(nsd, vnf_dict, openmano_vnfr_ids)
write_yaml_to_file(openmano_nsd["name"], args.outdir, openmano_nsd)
write_yaml_to_file(vnfd_nsd["name"], args.outdir, vnfd_nsd)
+
for vnf in vnf_dict.values():
openmano_vnf = rift2openmano_vnfd(vnf, nsd)
write_yaml_to_file(openmano_vnf["vnf"]["name"], args.outdir, openmano_vnf)
DESTINATION
usr/rift/mano/examples/tidgen_ns
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
#! /bin/bash
set -e
+set -x
SOURCE_DIR=@CMAKE_CURRENT_SOURCE_DIR@
BINARY_DIR=@CMAKE_CURRENT_BINARY_DIR@
PROJECT_TOP_DIR=@PROJECT_TOP_DIR@
# These paths are needed for finding the overrides and so files
-PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
-PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
-LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang:@RIFT_SUBMODULE_BINARY_ROOT@/common/plugins/yang
+export PYTHONPATH=${PYTHONPATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+export PYTHON3PATH=${PYTHON3PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/rwvcs/ra:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang
+export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@RIFT_SUBMODULE_BINARY_ROOT@/models/plugins/yang:@RIFT_SUBMODULE_BINARY_ROOT@/common/plugins/yang
+export GI_TYPELIB_PATH=${GI_TYPELIB_PATH}:@RIFT_SUBMODULE_SOURCE_ROOT@/models/plugins/yang:@RIFT_SUBMODULE_BINARY_ROOT@/common/plugins/yang
# Remove any old directories
rm -rf ${BINARY_DIR}/2tidgenMWC_4sriov
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
from gi.repository import (
RwYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
+ RwProjectYang,
)
logging.basicConfig(level=logging.WARNING)
class DescriptorFileWriter(object):
def __init__(self, module_list, output_dir, output_format):
- self._model = RwYang.Model.create_libncx()
+ self._model = RwYang.Model.create_libyang()
for module in module_list:
self._model.load_module(module)
return vnf_name
def openmano2rift(self, vnf_list):
- self.descriptor = RwNsdYang.YangData_Nsd_NsdCatalog()
+ self.descriptor = RwNsdYang.YangData_RwProject_Project_NsdCatalog()
openmano_nsd = self.openmano.dictionary
self.name = openmano_nsd['name']
nsd = self.descriptor.nsd.add()
return None
def openmano2rift(self):
- self.descriptor = RwVnfdYang.YangData_Vnfd_VnfdCatalog()
+ self.descriptor = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog()
vnfd = self.descriptor.vnfd.add()
self.vnfd = vnfd
vnfd.id = str(uuid.uuid1())
if not ext_conn:
continue
- ext_iface = vdu.external_interface.add()
+ ext_iface = vdu.interface.add()
ext_iface.name = numa_if['name']
- ext_iface.vnfd_connection_point_ref = ext_conn['name']
+ ext_iface.type_yang = 'EXTERNAL'
+ ext_iface.external_connection_point_ref = ext_conn['name']
ext_iface.virtual_interface.vpci = numa_if['vpci']
if numa_if['dedicated'] == 'no':
ext_iface.virtual_interface.type_yang = 'SR_IOV'
ext_conn = self.find_external_connection(vdu.name,
bridge_iface['name'])
if ext_conn:
- ext_iface = vdu.external_interface.add()
+ ext_iface = vdu.interface.add()
ext_iface.name = bridge_iface['name']
- ext_iface.vnfd_connection_point_ref = ext_conn['name']
+ ext_iface.type_yang = 'EXTERNAL'
+ ext_iface.external_connection_point_ref = ext_conn['name']
if 'vpci' in bridge_iface:
ext_iface.virtual_interface.vpci = bridge_iface['vpci']
ext_iface.virtual_interface.type_yang = 'VIRTIO'
vnf_list = create_vnfs_from_yaml_files(args.yaml_file_hdls)
ns_list = create_ns_from_yaml_files(args.yaml_file_hdls, vnf_list)
+ # TODO (Philip): Relook at the model generation
writer = DescriptorFileWriter(
- module_list=['nsd', 'rw-nsd', 'vnfd', 'rw-vnfd'],
+ module_list=['rw-project', 'project-nsd', 'rw-project-nsd', 'project-vnfd', 'rw-project-vnfd'],
output_dir=args.outdir,
output_format=args.format,
)
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
ietf-l2-topology.yang
ietf-network-topology.yang
ietf-network.yang
+ nsd-base.yang rw-nsd-base.yang
nsd.yang rw-nsd.yang
+ project-nsd.yang rw-project-nsd.yang
nsr.yang rw-nsr.yang
pnfd.yang
rw-topology.yang
vld.yang rw-vld.yang
vlr.yang rw-vlr.yang
+ vnfd-base.yang rw-vnfd-base.yang
vnfd.yang rw-vnfd.yang
+ project-vnfd.yang rw-project-vnfd.yang
vnfr.yang rw-vnfr.yang
+ mano-rift-groupings.yang
vnffgd.yang
)
TARGET mano-types_yang
YANG_FILES
mano-types.yang
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
+ LIBRARIES
+ rwprojectmano_yang_gen
)
rift_add_yang_target(
TARGET mano_yang
YANG_FILES ${source_yang_files}
GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
rwmanifest_yang_gen
rwschema_yang_gen
rwcloud_yang_gen
+ rwro_account_yang_gen
+ rwsdn_yang_gen
rwconfig_agent_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
rwcloud_yang
+ rwro_account_yang
+ rwsdn_yang
rwconfig_agent_yang
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ project-vnfd.role.xml
+ project-nsd.role.xml
+ vnfr.role.xml
+ rw-vnfr.role.xml
+ vlr.role.xml
+ nsr.role.xml
)
#rift_gen_yang_tree(mano-pyang-trees
prefix inet;
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
organization "TBD";
contact
"WILL-BE-DEFINED-LATER";
--- /dev/null
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+ module mano-rift-groupings
+{
+ namespace "urn:ietf:params:xml:ns:yang:nfvo:mano-rift-groupings";
+ prefix "mano-rift";
+
+ import vnfd {
+ prefix "vnfd";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import nsd {
+ prefix "nsd";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ grouping custom-meta-data {
+ description "Grouping for instance-specific meta data";
+ list custom-meta-data {
+ description
+ "List of meta-data to be associated with the instance";
+ key "name";
+ leaf name {
+ description "Name of the meta-data parameter";
+ type string;
+ }
+
+ leaf data-type {
+ description "Data-type the meta-data parameter";
+ type manotypes:meta-data-type;
+ default "STRING";
+ }
+
+ leaf value {
+ description "Value of the meta-data parameter";
+ type string;
+ }
+
+ leaf destination {
+ description "Type of input parameter";
+ type enumeration {
+ enum "CLOUD_INIT";
+ enum "CLOUD_METADATA";
+ }
+ default "CLOUD_METADATA";
+ }
+ }
+ }
+
+ grouping volume-info-additions {
+ leaf boot-volume {
+ description "This flag indicates if this is boot volume or not";
+ type boolean;
+ }
+
+ leaf boot-priority {
+ description "Boot priority associated with volume";
+ type int32;
+ }
+ }
+
+ grouping http-end-point-additions {
+ leaf data {
+ description
+ "This is the data to be sent with POST ";
+ type string;
+ }
+ }
+
+ grouping ssh-key-generated {
+ container ssh-key-generated {
+ description "SSH key pair generated for this NS";
+ leaf public-key {
+ description "Public key generated";
+ type string;
+ }
+ leaf private-key-file {
+ description "Path to the private key file";
+ type string;
+ }
+ }
+ }
+}
/*
- *
- * Copyright 2016 RIFT.IO Inc
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "inet";
}
- import rw-pb-ext {
- prefix "rwpb";
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
}
revision 2015-04-23 {
"Derived from earlier versions of base YANG files";
}
+ typedef meta-data-type {
+ type enumeration {
+ enum STRING;
+ }
+ }
+
typedef package-type {
description "Type of descriptor being on-boarded";
type enumeration {
"The value should be dimmed by the UI.
Only applies to parameters with default values.";
type boolean;
+ default false;
}
leaf hidden {
"The value should be hidden by the UI.
Only applies to parameters with default values.";
type boolean;
+ default false;
}
}
-
+
grouping ui-primitive-group {
list parameter-group {
description
"Grouping of parameters which are logically grouped in UI";
key "name";
-
+
leaf name {
description
"Name of the parameter group";
type string;
}
-
+
list parameter {
description
"List of parameters for the service primitive.";
key "name";
uses manotypes:primitive-parameter;
}
-
+
leaf mandatory {
description "Is this parameter group mandatory";
type boolean;
}
}
- grouping image-properties {
- leaf image {
- description
- "Image name for the software image.
- If the image name is found within the VNF package it will
- be uploaded to all VIM accounts during onboarding process.
- Otherwise, the image must be added to the VIM account with
- the same name as entered here.
- ";
- type string;
- }
-
- leaf image-checksum {
- description
- "Image md5sum for the software image.
- The md5sum, if provided, along with the image name uniquely
- identifies an image uploaded to the CAL.
- ";
- type string;
- }
- }
- grouping initial-config {
+ grouping event-config {
leaf seq {
description
"Sequence number for the configuration primitive.";
}
}
+ grouping image-properties {
+ leaf image {
+ description
+ "Image name for the software image.
+ If the image name is found within the VNF package it will
+ be uploaded to all VIM accounts during onboarding process.
+ Otherwise, the image must be added to the VIM account with
+ the same name as entered here.
+ ";
+ type string;
+ }
+
+ leaf image-checksum {
+ description
+ "Image md5sum for the software image.
+ The md5sum, if provided, along with the image name uniquely
+ identifies an image uploaded to the CAL.
+ ";
+ type string;
+ }
+ }
+
grouping vnf-configuration {
container vnf-configuration {
- rwpb:msg-new VnfConfiguration;
description
- "Information about the VNF configuration. Note:
+ "Information about the VNF configuration. Note:
If the NS contains multiple instances of the
same VNF, each instance could have a different
configuration.";
choice config-method {
description
"Defines the configuration method for the VNF.";
- case netconf {
- description
- "Use NETCONF for configuring the VNF.";
- container netconf {
- leaf target {
- description
- "Netconf configuration target";
- type enumeration {
- enum running;
- enum candidate;
- }
- }
-
- leaf protocol {
- description
- "Protocol to use for NETCONF, such as ssh";
- type enumeration {
- enum None;
- enum ssh;
- }
- }
-
- leaf port {
- description
- "Port for the NETCONF server.";
- type inet:port-number;
- }
- }
- }
-
- case rest {
- description
- "Use REST for configuring the VNF.";
- container rest {
- leaf port {
- description
- "Port for the REST server.";
- type inet:port-number;
- }
- }
- }
-
case script {
description
"Use custom script for configuring the VNF.
- This script is executed in the context of
+ This script is executed in the context of
Orchestrator (The same system and environment
as the Launchpad).";
container script {
leaf script-type {
description
- "Script type - currently supported : bash, expect";
+ "Script type - currently supported - Scripts confirming to Rift CA plugin";
type enumeration {
- enum bash;
- enum expect;
+ enum rift;
}
}
}
}
}
- container config-access {
- leaf mgmt-ip-address {
- description
- "IP address to be used to configure this VNF,
- optional if it is possible to resolve dynamically.";
- type inet:ip-address;
- }
-
- leaf username {
- description
- "User name for configuration.";
- type string;
- }
-
- leaf password {
- description
- "Password for configuration access authentication.";
- type string;
- }
- }
-
- container config-attributes {
- description
- "Miscellaneous input parameters to be considered
- while processing the NSD to apply configuration";
-
- leaf config-priority {
- description
- "Configuration priority - order of configuration
- to be applied to each VNF in this NS. A low
- number takes precedence over a high number";
- type uint64;
- }
-
- leaf config-delay {
- description
- "Wait (seconds) before applying the configuration to VNF";
- type uint64;
- }
- }
-
- list service-primitive {
- rwpb:msg-new ServicePrimitive;
+ list config-primitive {
description
- "List of service primitives supported by the
+ "List of config primitives supported by the
configuration agent for this VNF.";
key "name";
leaf name {
description
- "Name of the service primitive.";
+ "Name of the config primitive.";
type string;
}
list parameter {
description
- "List of parameters to the service primitive.";
+ "List of parameters to the config primitive.";
key "name";
uses primitive-parameter;
}
+
+ leaf user-defined-script {
+ description
+ "A user defined script. If user defined script is defined,
+ the script will be executed using bash";
+ type string;
+ }
}
list initial-config-primitive {
- rwpb:msg-new InitialConfigPrimitive;
description
"Initial set of configuration primitives.";
key "seq";
- uses initial-config;
- }
+ leaf seq {
+ description
+ "Sequence number for the configuration primitive.";
+ type uint64;
+ }
- leaf config-template {
- description
- "Configuration template for each VNF";
- type string;
+ choice primitive-type {
+ case primitive-definition {
+ leaf name {
+ description
+ "Name of the configuration primitive.";
+ type string;
+ }
+
+ uses primitive-parameter-value;
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
}
}
} // END - grouping vnf-configuration
description
"Type of the widget, typically used by the UI.";
type enumeration {
- enum HISTOGRAM;
- enum BAR;
- enum GAUGE;
- enum SLIDER;
enum COUNTER;
+ enum GAUGE;
enum TEXTBOX;
+ enum SLIDER;
+ enum HISTOGRAM;
+ enum BAR;
}
}
}
} //grouping vm-flavor
+ grouping vm-flavor-name {
+ leaf vm-flavor-name {
+ description "flavor name to be used while creating vm using cloud account";
+ type string;
+ }
+ }
+
grouping vswitch-epa {
container vswitch-epa {
leaf ovs-acceleration {
description "Number of threads per cores on the host.";
type uint64;
}
-
+
list cpu-feature {
key "feature";
description "List of CPU features.";
}
}
-
+
leaf om-cpu-model-string {
description "OpenMANO CPU model string";
type string;
description
"Type of the overlay network.
LOCAL - Provider network implemented in a single compute node
- FLAT - Provider network shared by all tenants
+ FLAT - Provider network shared by all tenants
VLAN - Provider network implemented using 802.1Q tagging
VXLAN - Provider networks implemented using RFC 7348
- GRE - Provider networks implemented using GRE tunnels";
+ GRE - Provider networks implemented using GRE tunnels
+ PORTGROUP - Provider networks implemented for VIO support";
type enumeration {
enum LOCAL;
enum FLAT;
enum VLAN;
enum VXLAN;
enum GRE;
+ enum PORTGROUP;
}
}
leaf segmentation_id {
}
}
+ grouping ns-service-primitive {
+ list service-primitive {
+ description
+ "Network service level service primitives.";
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ list parameter-group {
+ description
+ "Grouping of parameters which are logically grouped in UI";
+ key "name";
+
+ leaf name {
+ description
+ "Name of the parameter group";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ leaf mandatory {
+ description "Is this parameter group mandatory";
+ type boolean;
+ default true;
+ }
+ }
+
+ list vnf-primitive-group {
+ description
+ "List of service primitives grouped by VNF.";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+
+ type string;
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type string;
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
+
grouping monitoring-param {
list http-endpoint {
description
leaf widget-type {
description "Defines the UI Display variant of measured counters.";
type manotypes:widget-type;
+ default "COUNTER";
}
leaf units {
}
leaf default-value {
- description "/nsd:nsd-catalog/nsd:nsd/nsd:vendor";
+ description "Default Value for the Input Parameter";
type string;
}
}
leaf operation {
description
"The relational operator used to define whether an alarm should be
- triggered in certain scenarios, such as if the metric statistic
+ triggered in certain scenarios, such as if the metric statistic
goes above or below a specified value.";
type alarm-operation-type;
}
enum openvim;
}
}
-
+
grouping host-aggregate {
list host-aggregate {
description "Name of the Host Aggregate";
key "metadata-key";
-
+
leaf metadata-key {
description
"Name of the additional information attached to the host-aggregate";
}
}
}
-
+
grouping placement-group-input {
leaf cloud-type {
type manotypes:cloud-account-type;
}
choice cloud-provider {
- case openstack {
+ case openstack {
container availability-zone {
description "Name of the Availability Zone";
leaf name {
case openmano {
leaf openmano-construct {
type empty;
- }
+ }
}
case vsphere {
leaf vsphere-construct {
}
}
}
-
+
+ grouping cloud-config {
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+
+ list user {
+ key "name";
+ description "List of users to be added through cloud-config";
+
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+ }
+ }
+
grouping placement-group-info {
description "";
behind this placement group. This is for human consumption only";
type string;
}
-
+
leaf strategy {
description
"Strategy associated with this placement group
grouping ip-profile-info {
description "Grouping for IP-Profile";
container ip-profile-params {
-
+
leaf ip-version {
type inet:ip-version;
default ipv4;
list dns-server {
key "address";
leaf address {
- description "List of DNS Servers associated with IP Profile";
- type inet:ip-address;
+ description "List of DNS Servers associated with IP Profile";
+ type inet:ip-address;
}
}
- container dhcp-params {
+ container dhcp-params {
leaf enabled {
description "This flag indicates if DHCP is enabled or not";
type boolean;
description
"List of IP Profiles.
IP Profile describes the IP characteristics for the Virtual-Link";
-
+
key "name";
leaf name {
description "Name of the IP-Profile";
type string;
}
-
+
leaf description {
description "Description for IP profile";
type string;
}
-
+
uses ip-profile-info;
}
}
description "Some VIMs implement additional drives to host config-files or meta-data";
type boolean;
default false;
- }
+ }
}
}
case image {
uses image-properties;
}
-
}
leaf device-bus {
enum lun;
}
}
+ }
+ grouping rpc-project-name {
+ leaf project-name {
+ default "default";
+ description
+ "Project to which this belongs";
+ type leafref {
+ path "/rw-project:project/rw-project:name";
+ }
+ }
}
}
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module nsd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/nsd-base";
+ prefix "nsd-base";
+
+ import vld {
+ prefix "vld";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Network Service Descriptor (NSD)
+ common groupings";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ typedef scaling-trigger {
+ type enumeration {
+ enum pre-scale-in {
+ value 1;
+ }
+ enum post-scale-in {
+ value 2;
+ }
+ enum pre-scale-out {
+ value 3;
+ }
+ enum post-scale-out {
+ value 4;
+ }
+ }
+ }
+
+ typedef scaling-policy-type {
+ type enumeration {
+ enum manual {
+ value 1;
+ }
+ enum automatic {
+ value 2;
+ }
+ }
+ }
+
+ typedef scaling-criteria-operation {
+ type enumeration {
+ enum AND {
+ value 1;
+ }
+ enum OR {
+ value 2;
+ }
+ }
+ }
+
+ grouping primitive-parameter {
+ leaf name {
+ description
+ "Name of the parameter.";
+ type string;
+ }
+
+ leaf data-type {
+ description
+ "Data type associated with the name.";
+ type manotypes:parameter-data-type;
+ }
+
+ leaf mandatory {
+ description "Is this field mandatory";
+ type boolean;
+ default false;
+ }
+
+ leaf default-value {
+ description "The default value for this field";
+ type string;
+ }
+
+ leaf parameter-pool {
+ description "NSD parameter pool name to use for this parameter";
+ type string;
+ }
+ }
+
+ grouping nsd-descriptor-common {
+ leaf id {
+ description "Identifier for the NSD.";
+ type string {
+ length 1..63;
+ }
+ }
+
+ leaf name {
+ description "NSD name.";
+ mandatory true;
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Vendor of the NSD.";
+ type string;
+ }
+
+ leaf logo {
+ description
+ "File path for the vendor specific logo. For example icons/mylogo.png.
+ The logo should be part of the network service";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the NSD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the NSD";
+ type string;
+ }
+
+ list connection-point {
+ description
+ "List for external connection points.
+ Each NS has one or more external connection
+ points. As the name implies that external
+ connection points are used for connecting
+ the NS to other NS or to external networks.
+ Each NS exposes these connection points to
+ the orchestrator. The orchestrator can
+ construct network service chains by
+ connecting the connection points between
+ different NS.";
+
+ key "name";
+ leaf name {
+ description
+ "Name of the NS connection point.";
+ type string;
+ }
+
+ leaf type {
+ description
+ "Type of the connection point.";
+ type manotypes:connection-point-type;
+ }
+ }
+
+ list scaling-group-descriptor {
+ description
+ "scaling group descriptor within this network service.
+ The scaling group defines a group of VNFs,
+ and the ratio of VNFs in the network service
+ that is used as target for scaling action";
+
+ key "name";
+
+ leaf name {
+ description "Name of this scaling group.";
+ type string;
+ }
+
+ list scaling-policy {
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the scaling policy";
+ type string;
+ }
+
+ leaf scaling-type {
+ description
+ "Type of scaling";
+ type scaling-policy-type;
+ }
+
+ leaf enabled {
+ description
+ "Specifies if the scaling policy can be applied";
+ type boolean;
+ default true;
+ }
+
+ leaf scale-in-operation-type {
+ description
+ "Operation to be applied to check between scaling criterias to
+ check if the scale in threshold condition has been met.
+ Defaults to AND";
+ type scaling-criteria-operation;
+ default AND;
+ }
+
+ leaf scale-out-operation-type {
+ description
+ "Operation to be applied to check between scaling criterias to
+ check if the scale out threshold condition has been met.
+ Defauls to OR";
+ type scaling-criteria-operation;
+ default OR;
+ }
+
+ leaf threshold-time {
+ description
+ "The duration for which the criteria must hold true";
+ type uint32;
+ mandatory true;
+ }
+
+ leaf cooldown-time {
+ description
+ "The duration after a scaling-in/scaling-out action has been
+ triggered, for which there will be no further optional";
+ type uint32;
+ mandatory true;
+ }
+
+ list scaling-criteria {
+ description
+ "list of conditions to be met for generating scaling
+ requests";
+ key "name";
+
+ leaf name {
+ type string;
+ }
+
+ leaf scale-in-threshold {
+ description
+ "Value below which scale-in requests are generated";
+ type uint64;
+ }
+
+ leaf scale-out-threshold {
+ description
+ "Value above which scale-out requests are generated";
+ type uint64;
+ }
+
+ leaf ns-monitoring-param-ref {
+ description
+ "Reference to the NS level monitoring parameter
+ that is aggregated";
+ type leafref {
+ path "../../../../monitoring-param/id";
+ }
+ }
+ }
+ }
+
+ list vnfd-member {
+ description "List of VNFs in this scaling group";
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf count {
+ description
+ "count of this member VNF within this scaling group.
+ The count allows to define the number of instances
+ when a scaling action targets this scaling group";
+ type uint32;
+ default 1;
+ }
+ }
+
+ leaf min-instance-count {
+ description
+ "Minimum instances of the scaling group which are allowed.
+ These instances are created by default when the network service
+ is instantiated.";
+ type uint32;
+ default 0;
+ }
+
+ leaf max-instance-count {
+ description
+ "Maximum instances of this scaling group that are allowed
+ in a single network service. The network service scaling
+ will fail, when the number of service group instances
+ exceed the max-instance-count specified.";
+ type uint32;
+ default 10;
+ }
+
+ list scaling-config-action {
+ description "List of scaling config actions";
+ key "trigger";
+
+ leaf trigger {
+ description "scaling trigger";
+ type scaling-trigger;
+ }
+
+ leaf ns-service-primitive-name-ref {
+ description "Reference to the NS service primitive";
+ type leafref {
+ path "../../../service-primitive/name";
+ }
+ }
+ }
+ }
+
+
+ list vnffgd {
+ description
+ "List of VNF Forwarding Graph Descriptors (VNFFGD).";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the VNFFGD.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "VNFFGD name.";
+ type string;
+ }
+
+ leaf short-name {
+ description
+ "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Provider of the VNFFGD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VNFFGD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VNFFGD";
+ type string;
+ }
+
+ list rsp {
+ description
+ "List of Rendered Service Paths (RSP).";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the RSP.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "RSP name.";
+ type string;
+ }
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf order {
+ type uint8;
+ description
+ "A number that denotes the order of a VNF in a chain";
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref";
+
+ type leafref {
+ path "../../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" +
+ "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-connection-point-ref {
+ description
+ "A reference to a connection point name
+ in a vnfd. This is a leafref to path:
+ /vnfd:vnfd-catalog/vnfd:vnfd
+ + [vnfd:id = current()/../nsd:vnfd-id-ref]
+ + /vnfd:connection-point/vnfd:name
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+ // TODO: Keeping as string as this needs to be
+ // diffenent lvel based of if it is nsd-catalog or
+ // in nsr.
+ // type leafref {
+ // path "../../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
+ // "[vnfd:id = current()/../vnfd-id-ref]/" +
+ // "vnfd:connection-point/vnfd:name";
+ // }
+ type string;
+ }
+ }
+ } //rsp
+
+ list classifier {
+ description
+ "List of classifier rules.";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the classifier rule.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "Name of the classifier.";
+ type string;
+ }
+
+ leaf rsp-id-ref {
+ description
+ "A reference to the RSP.";
+ type leafref {
+ path "../../rsp/id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" +
+ "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-connection-point-ref {
+ description
+ "A reference to a connection point name
+ in a vnfd. This is a leafref to path:
+ /vnfd:vnfd-catalog/vnfd:vnfd
+ + [vnfd:id = current()/../nsd:vnfd-id-ref]
+ + /vnfd:connection-point/vnfd:name
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+ // TODO: Keeping as string as this needs to be
+ // diffenent lvel based of if it is nsd-catalog or
+ // in nsr.
+ // type leafref {
+ // path "../../../../../vnfd:vnfd-catalog/vnfd:vnfd" +
+ // "[vnfd:id = current()/../vnfd-id-ref]/" +
+ // "vnfd:connection-point/vnfd:name";
+ // }
+ type string;
+ }
+
+ list match-attributes {
+ description
+ "List of match attributes.";
+
+ key "id";
+
+ leaf id {
+ description
+ "Identifier for the classifier match attribute rule.";
+ type string;
+ }
+
+ leaf ip-proto {
+ description
+ "IP Protocol.";
+ type uint8;
+ }
+
+ leaf source-ip-address {
+ description
+ "Source IP address.";
+ type inet:ip-address;
+ }
+
+ leaf destination-ip-address {
+ description
+ "Destination IP address.";
+ type inet:ip-address;
+ }
+
+ leaf source-port {
+ description
+ "Source port number.";
+ type inet:port-number;
+ }
+
+ leaf destination-port {
+ description
+ "Destination port number.";
+ type inet:port-number;
+ }
+ //TODO: Add more match criteria
+ } //match-attributes
+ } // classifier
+ } // vnffgd
+
+ uses manotypes:ip-profile-list;
+
+ list initial-service-primitive {
+ description
+ "Initial set of service primitives for NSD.";
+ key "seq";
+
+ uses manotypes:event-config;
+ }
+
+ list terminate-service-primitive {
+ description
+ "Set of service primitives during
+ termination for NSD.";
+ key "seq";
+
+ uses manotypes:event-config;
+ }
+
+ uses manotypes:input-parameter-xpath;
+
+ list parameter-pool {
+ description
+ "Pool of parameter values which must be
+ pulled from during configuration";
+ key "name";
+
+ leaf name {
+ description
+ "Name of the configuration value pool";
+ type string;
+ }
+
+ container range {
+ description
+ "Create a range of values to populate the pool with";
+
+ leaf start-value {
+ description
+ "Generated pool values start at this value";
+ type uint32;
+ mandatory true;
+ }
+
+ leaf end-value {
+ description
+ "Generated pool values stop at this value";
+ type uint32;
+ mandatory true;
+ }
+ }
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+
+ list user {
+ key "name";
+ description "List of users to be added through cloud-config";
+
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+ }
+ }
+
+ grouping nsd-vld-common {
+ /* Still having issues modelling this,
+ see the comments under vnfd-connection-point-ref
+ */
+ description
+ "List of Virtual Link Descriptors.";
+
+ leaf id {
+ description
+ "Identifier for the VLD.";
+ type string;
+ }
+
+ leaf name {
+ description
+ "Virtual Link Descriptor (VLD) name.";
+ type string;
+ }
+
+ leaf short-name {
+ description
+ "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Provider of the VLD.";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VLD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VLD";
+ type string;
+ }
+
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
+
+ leaf root-bandwidth {
+ description
+ "For ELAN this is the aggregate bandwidth.";
+ type uint64;
+ }
+
+ leaf leaf-bandwidth {
+ description
+ "For ELAN this is the bandwidth of branches.";
+ type uint64;
+ }
+
+ // replicate for pnfd container here
+ uses manotypes:provider-network;
+
+ leaf mgmt-network {
+ description "Flag indicating whether this network is a VIM management network";
+ type boolean;
+ default false;
+ }
+
+ choice init-params {
+ description "Extra parameters for VLD instantiation";
+
+ case vim-network-ref {
+ leaf vim-network-name {
+ description
+ "Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.";
+ type string;
+ }
+ }
+
+ case vim-network-profile {
+ leaf ip-profile-ref {
+ description "Named reference to IP-profile object";
+ type leafref {
+ path "../../ip-profiles/name";
+ }
+ }
+ }
+
+ }
+ }
+
+ grouping monitoring-param-common {
+ description
+ "List of monitoring parameters from VNF's that should be
+ propogated up into NSR";
+
+ leaf id {
+ type string;
+ }
+
+ leaf name {
+ type string;
+ }
+
+ uses manotypes:monitoring-param-value;
+ uses manotypes:monitoring-param-ui-data;
+ uses manotypes:monitoring-param-aggregation;
+ }
+}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:nsd";
prefix "nsd";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import vld {
- prefix "vld";
- }
-
import vnfd {
prefix "vnfd";
}
- import ietf-inet-types {
- prefix "inet";
- }
-
- import ietf-yang-types {
- prefix "yang";
+ import nsd-base {
+ prefix "nsd-base";
}
import mano-types {
prefix "manotypes";
}
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-10-27 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
- typedef scaling-trigger {
- type enumeration {
- enum pre-scale-in {
- value 1;
- }
- enum post-scale-in {
- value 2;
- }
- enum pre-scale-out {
- value 3;
- }
- enum post-scale-out {
- value 4;
- }
- }
- }
-
- typedef scaling-policy-type {
- type enumeration {
- enum manual {
- value 1;
- }
- enum automatic {
- value 2;
- }
- }
- }
-
- typedef scaling-criteria-operation {
- type enumeration {
- enum AND {
- value 1;
- }
- enum OR {
- value 2;
- }
- }
- }
-
- grouping primitive-parameter {
- leaf name {
- description
- "Name of the parameter.";
- type string;
- }
-
- leaf data-type {
- description
- "Data type associated with the name.";
- type manotypes:parameter-data-type;
- }
-
- leaf mandatory {
- description "Is this field mandatory";
- type boolean;
- default false;
- }
-
- leaf default-value {
- description "The default value for this field";
- type string;
- }
-
- leaf parameter-pool {
- description "NSD parameter pool name to use for this parameter";
- type string;
- }
- }
-
- grouping nsd-descriptor {
- leaf id {
- description "Identifier for the NSD.";
- type string;
- }
-
- leaf name {
- description "NSD name.";
- mandatory true;
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Vendor of the NSD.";
- type string;
- }
-
- leaf logo {
+ grouping nsd-constituent-vnfd {
+ list constituent-vnfd {
description
- "File path for the vendor-specific logo. For example, icons/mylogo.png.
- The logo should be part of the network service";
- type string;
- }
+ "List of VNFDs that are part of this
+ network service.";
- leaf description {
- description "Description of the NSD.";
- type string;
- }
+ key "member-vnf-index";
- leaf version {
- description "Version of the NSD";
- type string;
- }
+ leaf member-vnf-index {
+ description
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
+ }
- list connection-point {
- description
- "List for external connection points.
- Each network service (NS) has one or more external connection
- points that connect the NS to other NSs or to external networks.
- Each NS exposes connection points to the orchestrator, which can
- construct network service chains by connecting the connection
- points between different NSs.";
- key "name";
- leaf name {
+ leaf vnfd-id-ref {
description
- "Name of the NS connection point.";
- type string;
+ "Identifier for the VNFD.";
+ type leafref {
+ path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ }
}
- leaf type {
+ leaf start-by-default {
description
- "Type of the connection point.";
- type manotypes:connection-point-type;
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
}
}
+ }
- /* Model Limitations,
- see the comments under vnfd-connection-point-ref
- */
+ grouping nsd-vld {
list vld {
- description
- "List of Virtual Link Descriptors (VLDs).";
key "id";
- leaf id {
- description
- "Identifier for the VLD.";
- type string;
- }
-
- leaf name {
- description
- "Virtual Link Descriptor (VLD) name.";
- type string;
- }
-
- leaf short-name {
- description
- "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Provider of the VLD.";
- type string;
- }
-
- leaf description {
- description "Description of the VLD.";
- type string;
- }
-
- leaf version {
- description "Version of the VLD";
- type string;
- }
-
- leaf type {
- type manotypes:virtual-link-type;
- }
-
- leaf root-bandwidth {
- description
- "For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
-
- leaf leaf-bandwidth {
- description
- "For ELAN this is the bandwidth of branches.";
- type uint64;
- }
+ uses nsd-base:nsd-vld-common;
list vnfd-connection-point-ref {
description
}
}
}
-
- // replicate for pnfd container here
- uses manotypes:provider-network;
-
- leaf mgmt-network {
- description "Flag indicating whether this network is a VIM management network";
- type boolean;
- default false;
- }
-
- choice init-params {
- description "Extra parameters for VLD instantiation";
-
- case vim-network-ref {
- leaf vim-network-name {
- description
- "Name of network in VIM account. This is used to indicate
- pre-provisioned network name in cloud account.";
- type string;
- }
- }
-
- case vim-network-profile {
- leaf ip-profile-ref {
- description "Named reference to IP-profile object";
- type leafref {
- path "../../ip-profiles/name";
- }
- }
- }
- }
}
+ }
- list constituent-vnfd {
+ grouping nsd-vnf-dependency {
+ list vnf-dependency {
description
- "List of VNFDs that are part of this
- network service.";
-
- key "member-vnf-index";
-
- leaf member-vnf-index {
- description
- "Identifier/index for the VNFD. This separate id
- is required so that multiple VNFs can be part of
- single NS";
- type uint64;
- }
-
- leaf vnfd-id-ref {
- description
- "Identifier for the VNFD.";
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
type leafref {
path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
}
}
-
- leaf start-by-default {
- description
- "VNFD is started as part of the NS instantiation";
- type boolean;
- default true;
- }
- }
-
- list scaling-group-descriptor {
- description
- "Scaling group descriptor within this network service.
- The scaling group defines a group of VNFs,
- and the ratio of VNFs in the network service
- that is used as target for scaling action";
-
- key "name";
-
- leaf name {
- description "Name of this scaling group.";
- type string;
- }
-
- list scaling-policy {
-
- key "name";
-
- leaf name {
- description
- "Name of the scaling policy";
- type string;
- }
-
- leaf scaling-type {
- description
- "Type of scaling";
- type scaling-policy-type;
- }
-
- leaf enabled {
- description
- "Specifies if the scaling policy can be applied";
- type boolean;
- default true;
- }
-
- leaf scale-in-operation-type {
- description
- "Operation to be applied to check between scaling criterias to
- check if the scale in threshold condition has been met.
- Defaults to AND";
- type scaling-criteria-operation;
- default AND;
- }
-
- leaf scale-out-operation-type {
- description
- "Operation to be applied to check between scaling criterias to
- check if the scale out threshold condition has been met.
- Defaults to OR";
- type scaling-criteria-operation;
- default OR;
- }
-
- leaf threshold-time {
- description
- "The duration for which the criteria must hold true";
- type uint32;
- mandatory true;
- }
-
- leaf cooldown-time {
- description
- "The duration after a scaling-in/scaling-out action has been
- triggered, for which there will be no further scaling activity";
- type uint32;
- mandatory true;
- }
-
- list scaling-criteria {
- description
- "list of conditions to be met for generating scaling
- requests";
- key "name";
-
- leaf name {
- description "Name of the scaling criteria";
- type string;
- }
-
- leaf scale-in-threshold {
- description
- "Value below which scale-in requests are generated
- (depends on monitoring parameters)";
- type uint64;
- }
-
- leaf scale-out-threshold {
- description
- "Value above which scale-out requests are generated
- (depends on monitoring parameters)";
- type uint64;
- }
-
- leaf ns-monitoring-param-ref {
- description
- "Reference to the NS level monitoring parameter
- that is aggregated";
- type leafref {
- path "../../../../monitoring-param/id";
- }
- }
- }
- }
-
- list vnfd-member {
- description "List of VNFs in this scaling group";
- key "member-vnf-index-ref";
-
- leaf member-vnf-index-ref {
- description "Member VNF index of this member VNF";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf count {
- description
- "Count of this member VNF within this scaling group.
- The count defines the number of instances when a
- scaling action targets this scaling group.";
- type uint32;
- default 1;
- }
- }
-
- leaf min-instance-count {
- description
- "Minimum number of instances of the scaling group that
- are allowed in a single network service. These instances
- are created by default when the network service is
- instantiated.";
- type uint32;
- default 0;
- }
-
- leaf max-instance-count {
+ leaf vnf-depends-on-ref {
description
- "Maximum number of instances of this scaling group that
- are allowed in a single network service. The network
- service scaling fails when the number of service group
- instances exceeds the max-instance-count specified.";
- type uint32;
- default 10;
- }
-
- list scaling-config-action {
- description "List of scaling config actions";
- key "trigger";
-
- leaf trigger {
- description "Scaling trigger";
- type scaling-trigger;
- }
-
- leaf ns-config-primitive-name-ref {
- description "Reference to the NS config name primitive";
- type leafref {
- path "../../../service-primitive/name";
- }
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
}
}
}
+ }
+ grouping nsd-placement-groups {
list placement-groups {
description "List of placement groups at NS level";
description
"Identifier for the VNFD.";
type leafref {
- path "../../../constituent-vnfd" +
+ path "../../../constituent-vnfd" +
"[member-vnf-index = current()/../member-vnf-index-ref]" +
"/vnfd-id-ref";
}
}
}
}
+ }
- uses manotypes:ip-profile-list;
-
- list vnf-dependency {
- description
- "List of VNF dependencies.";
- key vnf-source-ref;
- leaf vnf-source-ref {
- type leafref {
- path "../../constituent-vnfd/vnfd-id-ref";
- }
- }
- leaf vnf-depends-on-ref {
- description
- "Reference to VNF on which the source VNF depends.";
- type leafref {
- path "../../constituent-vnfd/vnfd-id-ref";
- }
- }
- }
-
- list vnffgd {
- description
- "List of VNF Forwarding Graph Descriptors (VNFFGD).";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the VNFFGD.";
- type string;
- }
-
- leaf name {
- description
- "VNFFGD name.";
- type string;
- }
-
- leaf short-name {
- description
- "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Provider of the VNFFGD.";
- type string;
- }
-
- leaf description {
- description "Description of the VNFFGD.";
- type string;
- }
-
- leaf version {
- description "Version of the VNFFGD";
- type string;
- }
-
- list rsp {
- description
- "List of Rendered Service Paths (RSP).";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the RSP.";
- type string;
- }
-
- leaf name {
- description
- "RSP name.";
- type string;
- }
-
- list vnfd-connection-point-ref {
- description
- "A list of references to connection points.";
- key "member-vnf-index-ref";
-
- leaf member-vnf-index-ref {
- description "Reference to member-vnf within constituent-vnfds";
- type leafref {
- path "../../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf order {
- type uint8;
- description
- "A number that denotes the order of a VNF in a chain";
- }
-
- leaf vnfd-id-ref {
- description
- "A reference to a vnfd";
- type leafref {
- path "../../../../constituent-vnfd" +
- "[member-vnf-index = current()/../member-vnf-index-ref]" +
- "/vnfd-id-ref";
- }
- }
-
- leaf vnfd-connection-point-ref {
- description
- "A reference to a connection point name";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd" +
- "[vnfd:id = current()/../vnfd-id-ref]/" +
- "vnfd:connection-point/vnfd:name";
- }
- }
- }
- } //rsp
-
- list classifier {
- description
- "List of classifier rules.";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the classifier rule.";
- type string;
- }
-
- leaf name {
- description
- "Name of the classifier.";
- type string;
- }
-
- leaf rsp-id-ref {
- description
- "A reference to the RSP.";
- type leafref {
- path "../../rsp/id";
- }
- }
-
- leaf member-vnf-index-ref {
- description "Reference to member-vnf within constituent-vnfds";
- type leafref {
- path "../../../constituent-vnfd/member-vnf-index";
- }
- }
-
- leaf vnfd-id-ref {
- description
- "A reference to a VNFD";
- type leafref {
- path "../../../constituent-vnfd" +
- "[member-vnf-index = current()/../member-vnf-index-ref]" +
- "/vnfd-id-ref";
- }
- }
-
- leaf vnfd-connection-point-ref {
- description
- "A reference to a connection point name";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd" +
- "[vnfd:id = current()/../vnfd-id-ref]/" +
- "vnfd:connection-point/vnfd:name";
- }
- }
-
- list match-attributes {
- description
- "List of match attributes.";
-
- key "id";
-
- leaf id {
- description
- "Identifier for the classifier match attribute rule.";
- type string;
- }
-
- leaf ip-proto {
- description
- "Internet Protocol.";
- type uint8;
- }
-
- leaf source-ip-address {
- description
- "Source IP address.";
- type inet:ip-address;
- }
-
- leaf destination-ip-address {
- description
- "Destination IP address.";
- type inet:ip-address;
- }
-
- leaf source-port {
- description
- "Source port number.";
- type inet:port-number;
- }
-
- leaf destination-port {
- description
- "Destination port number.";
- type inet:port-number;
- }
- } //match-attributes
- } // classifier
- } // vnffgd
+ grouping nsd-monitoring-param {
list monitoring-param {
- description
- "List of monitoring parameters from VNFs that should be
- propogated up into NSR";
- key "id";
+ key id;
- leaf id {
- description "Identifier for a monitoring parameter";
- type string;
- }
-
- leaf name {
- description "Name of the monitoring parameter";
- type string;
- }
-
- uses manotypes:monitoring-param-value;
- uses manotypes:monitoring-param-ui-data;
- uses manotypes:monitoring-param-aggregation;
+ uses nsd-base:monitoring-param-common;
list vnfd-monitoring-param {
description "A list of VNFD monitoring params";
}
}
}
+ }
- uses manotypes:input-parameter-xpath;
-
- list parameter-pool {
- description
- "Pool of parameter values from which to choose during
- configuration.";
- key "name";
-
- leaf name {
- description
- "Name of the configuration value pool";
- type string;
- }
-
- container range {
- description
- "Create a range of values from which to populate the pool with";
-
- leaf start-value {
- description
- "Generated pool values start at this value";
- type uint32;
- mandatory true;
- }
-
- leaf end-value {
- description
- "Generated pool values stop at this value";
- type uint32;
- mandatory true;
- }
- }
- }
-
- list service-primitive {
+ grouping nsd-service-primitive {
+ list service-primitive {
description
"Network service level service primitives.";
type string;
}
}
+ }
- list initial-config-primitive {
- rwpb:msg-new NsdInitialConfigPrimitive;
- description
- "Initial set of configuration primitives for NSD.";
- key "seq";
-
- uses manotypes:initial-config;
- }
-
- list key-pair {
- key "name";
- description "Used to configure the list of public keys to be injected as part
- of NS instantiation";
-
- leaf name {
- description "Name of this key pair";
- type string;
- }
-
- leaf key {
- description "Key associated with this key pair";
- type string;
- }
- }
-
- list user {
- key "name";
- description "List of users to be added through cloud-config";
-
- leaf name {
- description "Name of the user ";
- type string;
- }
+ container nsd-catalog {
- leaf user-info {
- description "The user name's real name";
- type string;
- }
+ list nsd {
+ key id;
- list key-pair {
- key "name";
- description "Used to configure the list of public keys to be injected as part
- of NS instantiation";
+ uses nsd-base:nsd-descriptor-common;
- leaf name {
- description "Name of this key pair";
- type string;
- }
+ uses nsd-vld;
- leaf key {
- description "Key associated with this key pair";
- type string;
- }
- }
- }
- }
+ uses nsd-constituent-vnfd;
+ uses nsd-placement-groups;
- container nsd-catalog {
+ uses nsd-vnf-dependency;
- list nsd {
- key "id";
+ uses nsd-monitoring-param;
- uses nsd-descriptor;
+ uses nsd-service-primitive;
}
}
-
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:nsr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/nsr:exec-scale-out/nsr:project-name</path>
+ <path>/nsr:exec-scale-in/nsr:project-name</path>
+ <path>/nsr:exec-ns-service-primitive/nsr:project-name</path>
+ <path>/nsr:get-ns-service-primitive-values/nsr:project-name</path>
+ <path>/nsr:start-network-service/nsr:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:nsr-role</keys-role>
+ <priority>
+ <lower-than>
+ <role>rw-project:project-admin</role>
+ </lower-than>
+ </priority>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/nsr:ns-instance-config</path>
+ <path>/rw-project:project/nsr:ns-instance-opdata</path>
+ <path>/rw-project:project/nsr:key-pair</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:nsr-role</keys-role>
+ <priority>
+ <higher-than>
+ <role>rw-project-mano:lcm-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:account-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:catalog-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project:project-oper</role>
+ </higher-than>
+
+ </priority>
+
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/nsr:ns-instance-config</path>
+ <path>/rw-project:project/nsr:ns-instance-opdata</path>
+ <path>/rw-project:project/nsr:key-pair</path>
+ <path>/nsr:exec-scale-out</path>
+ <path>/nsr:exec-scale-in</path>
+ <path>/nsr:exec-ns-service-primitive</path>
+ <path>/nsr:get-ns-service-primitive-values</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:nsr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/nsr:exec-scale-out</path>
+ <path>/nsr:exec-scale-in</path>
+ <path>/nsr:exec-ns-service-primitive</path>
+ <path>/nsr:get-ns-service-primitive-values</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix nsr;
}
- tailf:annotate "/nsr:ns-instance-opdata" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/nsr:ns-instance-opdata" {
tailf:callpoint rw_callpoint;
}
tailf:annotate "/nsr:exec-ns-service-primitive" {
tailf:actionpoint rw_actionpoint;
}
- tailf:annotate "/nsr:exec-scale-out" {
+ tailf:annotate "/nsr:get-ns-service-primitive-values" {
tailf:actionpoint rw_actionpoint;
}
tailf:annotate "/nsr:exec-scale-in" {
tailf:actionpoint rw_actionpoint;
}
- tailf:annotate "/nsr:get-ns-service-primitive-values" {
- tailf:actionpoint rw_actionpoint;
- }
- tailf:annotate "/nsr:start-network-service" {
+ tailf:annotate "/nsr:exec-scale-out" {
tailf:actionpoint rw_actionpoint;
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:nsr";
prefix "nsr";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import vlr {
prefix "vlr";
}
prefix "vld";
}
- import nsd {
- prefix "nsd";
+ import nsd-base {
+ prefix "nsd-base";
+ }
+
+ import project-nsd {
+ prefix "project-nsd";
}
- import vnfd {
- prefix "vnfd";
+ import project-vnfd {
+ prefix "project-vnfd";
}
import vnfr {
}
import rw-sdn {
- prefix "rwsdn";
+ prefix "rw-sdn";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
"Derived from earlier versions of base YANG files";
}
+ typedef vnffgr-operational-status {
+ type enumeration {
+ enum init;
+ enum running;
+ enum terminate;
+ enum terminated;
+ enum failed;
+ }
+ }
+
+ typedef ns-operational-status {
+ type enumeration {
+ enum init;
+ enum vl-init-phase;
+ enum vnf-init-phase;
+ enum running;
+ enum terminate;
+ enum vnf-terminate-phase;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ enum scaling-out;
+ enum scaling-in;
+ enum vl-instantiate;
+ enum vl-terminate;
+ }
+ }
+
typedef config-states {
type enumeration {
enum init;
enum config_not_needed;
enum configured;
enum failed;
+ enum terminate;
}
}
leaf key-pair-ref {
description "A reference to the key pair entry in the global key pair table";
type leafref {
- path "/nsr:key-pair/nsr:name";
+ path "../../../../key-pair/name";
}
}
}
list user {
key "name";
-
- description "List of users to be added through cloud-config";
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
leaf name {
- description "Name of the user ";
+ description "Name of this key pair";
type string;
}
leaf user-info {
list ssh-authorized-key {
key "key-pair-ref";
- description "Used to configure the list of public keys to be injected as part
+ description "Used to configure the list of public keys to be injected as part
of ns instantiation";
leaf key-pair-ref {
description "A reference to the key pair entry in the global key pair table";
type leafref {
- path "/nsr:key-pair/nsr:name";
+ path "../../../../../key-pair/name";
}
}
}
}
}
- list key-pair {
- key "name";
- description "Used to configure the list of public keys to be injected as part
+ augment "/rw-project:project" {
+ list key-pair {
+ key "name";
+ description "Used to configure the list of public keys to be injected as part
of ns instantiation";
+ leaf name {
+ description "Name of this key pair";
+ type string;
+ }
+
+ leaf key {
+ description "Key associated with this key pair";
+ type string;
+ }
+ }
+ }
+
+ grouping event-service-primitive {
+ leaf seq {
+ description
+ "Sequence number for the service primitive.";
+ type uint64;
+ }
+
leaf name {
- description "Name of this key pair";
+ description
+ "Name of the service primitive.";
type string;
+ mandatory "true";
}
- leaf key {
- description "Key associated with this key pair";
+ leaf user-defined-script {
+ description
+ "A user defined script.";
type string;
}
- }
- rpc start-network-service {
- description "Start the network service";
- input {
+ list parameter {
+ key "name";
leaf name {
- mandatory true;
- description "Name of the Network Service";
type string;
}
- leaf nsd-ref {
- description "Reference to NSR ID ref";
- mandatory true;
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd/nsd:id";
- }
- }
- uses ns-instance-config-params;
- }
- output {
- leaf nsr-id {
- description "Automatically generated parameter";
- type yang:uuid;
+ leaf value {
+ type string;
}
}
}
+ augment "/rw-project:project" {
+ container ns-instance-config {
+ list nsr {
+ key "id";
+ unique "name";
- container ns-instance-config {
+ leaf id {
+ description "Identifier for the NSR.";
+ type yang:uuid;
+ }
- list nsr {
- key "id";
- unique "name";
+ leaf name {
+ description "NSR name.";
+ type string;
+ }
- leaf id {
- description "Identifier for the NSR.";
- type yang:uuid;
- }
+ leaf short-name {
+ description "NSR short name.";
+ type string;
+ }
- leaf name {
- description "NSR name.";
- type string;
- }
+ leaf description {
+ description "NSR description.";
+ type string;
+ }
- leaf short-name {
- description "NSR short name.";
- type string;
- }
+ leaf admin-status {
+ description
+ "This is the administrative status of the NS instance";
- leaf description {
- description "NSR description.";
- type string;
- }
+ type enumeration {
+ enum ENABLED;
+ enum DISABLED;
+ }
+ }
- leaf admin-status {
- description
- "This is the administrative status of the NS instance";
+ container nsd {
+ description "NS descriptor used to instantiate this NS";
- type enumeration {
- enum ENABLED;
- enum DISABLED;
- }
- }
+ uses nsd-base:nsd-descriptor-common;
- container nsd {
- description "NS descriptor used to instantiate this NS";
- uses nsd:nsd-descriptor;
- }
+ uses project-nsd:nsr-nsd-vld;
+
+ uses project-nsd:nsr-nsd-constituent-vnfd;
- uses ns-instance-config-params;
+ uses project-nsd:nsr-nsd-placement-groups;
+
+ uses project-nsd:nsr-nsd-vnf-dependency;
+
+ uses project-nsd:nsr-nsd-monitoring-param;
+
+ uses project-nsd:nsr-nsd-service-primitive;
+ }
+ uses ns-instance-config-params;
+ }
}
}
- grouping ns-instance-config-params {
+ grouping ns-instance-config-params-common {
uses manotypes:input-parameter;
list scaling-group {
leaf scaling-group-name-ref {
description "name of the scaling group
- leafref path ../../nsd/scaling-group-descriptor/name";
+ leafref path ../nsd/scaling-group-descriptor/name";
type string;
}
list nsd-placement-group-maps {
description
"Mapping from mano-placement groups construct from NSD to cloud
- platform placement group construct";
+ platform placement group construct";
key "placement-group-ref";
leaf placement-group-ref {
- description "Reference for NSD placement group
- leafref path ../../nsd/placement-groups/name";
+ description
+ "Reference for NSD placement group";
+ // type leafref {
+ // path "../../nsd/placement-groups/name";
+ // }
type string;
}
uses manotypes:placement-group-input;
}
+ }
+
+ grouping ns-instance-config-params {
+ uses ns-instance-config-params-common;
- list vnfd-placement-group-maps {
+ list vnfd-placement-group-maps {
description
- "Mapping from mano-placement groups construct from VNFD to cloud
+ "Mapping from mano-placement groups construct from VNFD to cloud
platform placement group construct";
- key "placement-group-ref vnfd-id-ref";
+ key "placement-group-ref vnfd-id-ref";
- leaf vnfd-id-ref {
- description
+ leaf vnfd-id-ref {
+ description
"A reference to a vnfd. This is a
leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsr:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
- NOTE: confd limitations prevent the use of xpath";
- type yang:uuid;
- }
+ ../../../../project-nsd:constituent-vnfd
+ + [id = current()/../id-ref]
+ + /project-nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+ type yang:uuid;
+ }
- leaf placement-group-ref {
- description
+ leaf placement-group-ref {
+ description
"A reference to VNFD placement group";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = current()/" +
- "../nsr:vnfd-id-ref]/vnfd:placement-groups/vnfd:name";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = " +
+ "current()/../vnfd-id-ref]/project-vnfd:placement-groups/project-vnfd:name";
+ }
}
+
+ uses manotypes:placement-group-input;
}
- uses manotypes:placement-group-input;
- }
- uses cloud-config;
+ uses cloud-config;
}
grouping vnffgr {
leaf vnffgd-id-ref {
description "VNFFG descriptor id reference";
type leafref {
- path "/nsr:ns-instance-config/nsr:nsr"
- + "[nsr:id=current()/../../ns-instance-config-ref]"
- + "/nsr:nsd/nsr:vnffgd/nsr:id";
+ path "../../../../ns-instance-config/nsr"
+ + "[id=current()/../../ns-instance-config-ref]"
+ + "/nsd/vnffgd/id";
}
}
leaf vnffgd-name-ref {
description "VNFFG descriptor name reference";
type leafref {
- path "/ns-instance-config/nsr"
+ path "../../../../ns-instance-config/nsr"
+ "[id=current()/../../ns-instance-config-ref]"
- + "/nsd/vnffgd[nsr:id = current()/../vnffgd-id-ref]"
+ + "/nsd/vnffgd"
+ + "[id=current()/../vnffgd-id-ref]"
+ "/name";
}
}
"The SDN account to use when requesting resources for
this vnffgr";
type leafref {
- path "/rwsdn:sdn/rwsdn:account/rwsdn:name";
+ path "../../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
}
}
+ leaf cloud-account {
+ description "Cloud Account in which NSR is instantiated";
+ type string;
+ }
+
leaf operational-status {
description
"The operational status of the VNFFGR instance
terminated : The VNFFGR is in the terminated state.
failed : The VNFFGR instantiation failed
";
-
- type enumeration {
- rwpb:enum-type "VnffgrOperationalStatus";
- enum init;
- enum running;
- enum terminate;
- enum terminated;
- enum failed;
- }
+ type vnffgr-operational-status;
}
list rsp {
type string;
}
+ leaf rsp-id {
+ description
+ "Returned Identifier for the RSP.";
+ type yang:uuid;
+ }
+
leaf vnffgd-rsp-id-ref {
description
"Identifier for the VNFFG Descriptor RSP reference";
type leafref {
- path "/ns-instance-config/nsr"
+ path "../../../../../ns-instance-config/nsr"
+ "[id=current()/../../../ns-instance-config-ref]"
+ "/nsd/vnffgd"
+ "[id=current()/../../vnffgd-id-ref]"
description
"Name for the VNFFG Descriptor RSP reference";
type leafref {
- path "/ns-instance-config/nsr:nsr"
+ path "../../../../../ns-instance-config/nsr"
+ "[id=current()/../../../ns-instance-config-ref]"
+ "/nsd/vnffgd"
+ "[id=current()/../../vnffgd-id-ref]"
- + "/rsp[id=current()/../vnffgd-rsp-id-ref]"
+ + "/rsp"
+ + "[id=current()/../vnffgd-rsp-id-ref]"
+ "/name";
}
}
description
"A reference to a vnfr id";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
}
}
leaf vnfr-name-ref {
description
"A reference to a vnfr name";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
}
}
leaf vnfr-connection-point-ref {
description
"A reference to a vnfr connection point.";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+ path "../../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ "/vnfr:connection-point/vnfr:name";
}
}
type string;
}
leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf address {
"Name of the classifier.";
type string;
}
+ leaf-list classifier-id {
+ description
+ "Returned Identifier for the classifier rule.";
+ type yang:uuid;
+ }
leaf rsp-id-ref {
description
"A reference to the RSP.";
type leafref {
- path "../../nsr:rsp/nsr:id";
+ path "../../rsp/id";
}
}
leaf rsp-name {
description
"A reference to a vnfr id";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
}
}
leaf vnfr-name-ref {
description
"A reference to a vnfr name";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:name";
}
}
leaf vnfr-connection-point-ref {
description
"A reference to a vnfr connection point.";
type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ "/vnfr:connection-point/vnfr:name";
}
}
leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf ip-address {
}
}
- container ns-instance-opdata {
- config false;
+ augment "/rw-project:project" {
+ container ns-instance-opdata {
+ config false;
- list nsr {
- key "ns-instance-config-ref";
+ list nsr {
+ key "ns-instance-config-ref";
- leaf ns-instance-config-ref {
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
+ leaf ns-instance-config-ref {
+ type leafref {
+ path "../../../ns-instance-config/nsr/id";
+ }
+ // type yang:uuid;
}
- }
- leaf name-ref {
- description "Network service name reference";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:name";
+ leaf name-ref {
+ description "Network service name reference";
+ type leafref {
+ path "../../../ns-instance-config/nsr" +
+ "[id=current()/../ns-instance-config-ref]" +
+ "/name";
+ }
}
- }
- leaf nsd-ref {
- description "Network service descriptor id reference";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../ns-instance-config-ref]"
- + "/nsd/id";
+ leaf nsd-ref {
+ description "Network service descriptor id reference";
+ type leafref {
+ path "../../../ns-instance-config/nsr"
+ + "[id=current()/../ns-instance-config-ref]"
+ + "/nsd/id";
+ }
}
- }
- leaf nsd-name-ref {
- description "Network service descriptor name reference";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../ns-instance-config-ref]"
- + "/nsd/name";
+ leaf nsd-name-ref {
+ description "Network service descriptor name reference";
+ type leafref {
+ path "../../../ns-instance-config/nsr"
+ + "[id=current()/../ns-instance-config-ref]"
+ + "/nsd/name";
+ }
}
- }
- leaf create-time {
- description
- "Creation timestamp of this Network Service.
+ leaf create-time {
+ description
+ "Creation timestamp of this Network Service.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Network Service.
+ leaf uptime {
+ description
+ "Active period of this Network Service.
Uptime is expressed in seconds";
- type uint32;
- }
+ type uint32;
+ }
- list connection-point {
- description
+ list connection-point {
+ description
"List for external connection points.
Each NS has one or more external connection points.
As the name implies that external connection points
construct network service chains by connecting the
connection points between different NS.";
- key "name";
- leaf name {
- description
+ key "name";
+ leaf name {
+ description
"Name of the NS connection point.";
- type string;
- }
+ type string;
+ }
- leaf type {
- description
+ leaf type {
+ description
"Type of the connection point.";
- type manotypes:connection-point-type;
+ type manotypes:connection-point-type;
+ }
}
- }
- list vlr {
- key "vlr-ref";
- leaf vlr-ref {
- description
+ list vlr {
+ key "vlr-ref";
+ leaf vlr-ref {
+ description
"Reference to a VLR record in the VLR catalog";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
}
- }
- list vnfr-connection-point-ref {
- description
- "A list of references to connection points.";
- key "vnfr-id";
+ list vnfr-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "vnfr-id";
- leaf vnfr-id {
- description "A reference to a vnfr";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ leaf vnfr-id {
+ description "A reference to a vnfr";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
+ }
}
- }
- leaf connection-point {
- description
+ leaf connection-point {
+ description
"A reference to a connection point name in a vnfr";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id]"
- + "/vnfr:connection-point/vnfr:name";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id]"
+ + "/vnfr:connection-point/vnfr:name";
+ }
}
}
}
- }
- list constituent-vnfr-ref {
- description
+ list constituent-vnfr-ref {
+ description
"List of VNFRs that are part of this
network service.";
- key "vnfr-id";
+ key "vnfr-id";
- leaf vnfr-id {
- description
- "Reference to the VNFR id
- This should be a leafref to /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id
- But due to confd bug (RIFT-9451), changing to string.
- ";
- type string;
+ leaf vnfr-id {
+ description
+ "Reference to the VNFR id
+ This should be a leafref to /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id
+ But due to confd bug (RIFT-9451), changing to string.";
+ type string;
+ }
}
- }
- list scaling-group-record {
- description "List of scaling group records";
- key "scaling-group-name-ref";
+ list scaling-group-record {
+ description "List of scaling group records";
+ key "scaling-group-name-ref";
- leaf scaling-group-name-ref {
- description "name of the scaling group";
- type leafref {
- path "/ns-instance-config/nsr"
- + "[id=current()/../../ns-instance-config-ref]"
- + "/nsd/scaling-group-descriptor/name";
+ leaf scaling-group-name-ref {
+ description "name of the scaling group";
+ type leafref {
+ path "../../../../ns-instance-config/nsr"
+ + "[id=current()/../../ns-instance-config-ref]"
+ + "/nsd/scaling-group-descriptor/name";
+ }
}
- }
- list instance {
- description "Reference to scaling group instance record";
- key "instance-id";
- leaf instance-id {
- description "Scaling group instance id";
- type uint16;
- }
+ list instance {
+ description "Reference to scaling group instance record";
+ key "instance-id";
+ leaf instance-id {
+ description "Scaling group instance id";
+ type uint16;
+ }
- leaf is-default {
- description "Flag indicating whether this instance was part of
+ leaf is-default {
+ description "Flag indicating whether this instance was part of
default scaling group (and thus undeletable)";
- type boolean;
- }
+ type boolean;
+ }
- leaf op-status {
- description
- "The operational status of the NS instance
+ leaf op-status {
+ description
+ "The operational status of the NS instance
init : The scaling group has just started.
vnf-init-phase : The VNFs in the scaling group are being instantiated.
running : The scaling group is in running state.
failed : The scaling group instantiation failed.
";
- type enumeration {
- enum init;
- enum vnf-init-phase;
- enum running;
- enum terminate;
- enum vnf-terminate-phase;
- enum terminated;
- enum failed;
+ type enumeration {
+ enum init;
+ enum vnf-init-phase;
+ enum running;
+ enum terminate;
+ enum vnf-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
}
- }
- leaf config-status {
- description
- "The configuration status of the scaling group instance
+ leaf config-status {
+ description
+ "The configuration status of the scaling group instance
configuring : At least one of the VNFs in this scaling group instance
is in configuring state
configured : All the VNFs in this scaling group instance are
configured or config-not-needed state
failed : Configuring this scaling group instance failed
";
- type config-states;
- }
+ type config-states;
+ }
- leaf error-msg {
- description
- "Reason for failure in configuration of this scaling instance";
- type string;
- }
+ leaf error-msg {
+ description
+ "Reason for failure in configuration of this scaling instance";
+ type string;
+ }
- leaf create-time {
- description
- "Creation timestamp of this scaling group record.
+ leaf create-time {
+ description
+ "Creation timestamp of this scaling group record.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
type uint32;
- }
+ }
- leaf-list vnfrs {
- description "Reference to VNFR within the scale instance";
- type leafref {
- path "../../../constituent-vnfr-ref/vnfr-id";
+ leaf-list vnfrs {
+ description "Reference to VNFR within the scale instance";
+ type leafref {
+ path "../../../constituent-vnfr-ref/vnfr-id";
+ }
}
}
}
- }
- uses vnffgr;
+ uses vnffgr;
- leaf operational-status {
- description
- "The operational status of the NS instance
+ leaf operational-status {
+ description
+ "The operational status of the NS instance
init : The network service has just started.
vl-init-phase : The VLs in the NS are being instantiated.
vnf-init-phase : The VNFs in the NS are being instantiated.
vl-terminate : The NS is terminating a VL
";
- type enumeration {
- enum init;
- enum vl-init-phase;
- enum vnf-init-phase;
- enum running;
- enum terminate;
- enum vnf-terminate-phase;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
- enum scaling-out;
- enum scaling-in;
- enum vl-instantiate;
- enum vl-terminate;
+ type ns-operational-status;
}
- }
- leaf config-status {
- description
- "The configuration status of the NS instance
+ leaf config-status {
+ description
+ "The configuration status of the NS instance
configuring: At least one of the VNFs in this instance is in configuring state
configured: All the VNFs in this NS instance are configured or config-not-needed state
";
- type config-states;
- }
-
- list service-primitive {
- description
- "Network service level service primitives.";
+ type config-states;
+ }
- key "name";
+ list service-primitive {
+ description
+ "Network service level service primitives.";
- leaf name {
- description
- "Name of the service primitive.";
- type string;
- }
+ key "name";
- list parameter {
- description
- "List of parameters for the service primitive.";
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
- key "name";
- uses manotypes:primitive-parameter;
- }
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
- uses manotypes:ui-primitive-group;
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
- list vnf-primitive-group {
- description
- "List of service primitives grouped by VNF.";
+ uses manotypes:ui-primitive-group;
- key "member-vnf-index-ref";
- leaf member-vnf-index-ref {
+ list vnf-primitive-group {
description
- "Reference to member-vnf within constituent-vnfds";
- type uint64;
- }
+ "Reference to member-vnf within constituent-vnfds";
- leaf vnfd-id-ref {
- description
- "A reference to a vnfd. This is a
- leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsd:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
- NOTE: confd limitations prevent the use of xpath";
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type uint64;
+ }
- type string;
- }
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
- leaf vnfd-name {
- description
- "Name of the VNFD";
- type string;
- }
+ type string;
+ }
- list primitive {
- key "index";
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type string;
+ }
- leaf index {
- description "Index of this primitive";
- type uint32;
- }
+ list primitive {
+ key "index";
- leaf name {
- description "Name of the primitive in the VNF primitive ";
- type string;
- }
- }
- }
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
- leaf user-defined-script {
- description
- "A user defined script.";
- type string;
- }
- }
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
- list initial-config-primitive {
- rwpb:msg-new NsrInitialConfigPrimitive;
- description
- "Initial set of configuration primitives for NSD.";
- key "seq";
- leaf seq {
- description
- "Sequence number for the configuration primitive.";
- type uint64;
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
}
- leaf name {
+ list initial-service-primitive {
description
- "Name of the configuration primitive.";
- type string;
- mandatory "true";
+ "Initial set of service primitives for NSD.";
+ key "seq";
+
+ uses event-service-primitive;
}
- leaf user-defined-script {
+ list terminate-service-primitive {
description
- "A user defined script.";
- type string;
+ "Set of service primitives to
+ execute during termination of NSD.";
+ key "seq";
+
+ uses event-service-primitive;
}
- list parameter {
+ list monitoring-param {
description
- "List of parameters for the initial config primitive";
- key "name";
- leaf name {
- description "Name of the intitial config parameter";
+ "List of NS level params.";
+ key "id";
+
+ uses manotypes:monitoring-param-value;
+ uses manotypes:monitoring-param-ui-data;
+ uses manotypes:monitoring-param-aggregation;
+
+ leaf id {
type string;
}
- leaf value {
- description "Value associated with the initial config
- parameter";
+ leaf name {
type string;
}
- }
- }
-
-
- list monitoring-param {
- description
- "List of NS level params.";
- key "id";
- uses manotypes:monitoring-param-value;
- uses manotypes:monitoring-param-ui-data;
- uses manotypes:monitoring-param-aggregation;
-
- leaf id {
- type string;
- }
-
- leaf name {
- type string;
- }
-
- leaf nsd-mon-param-ref {
- description "Reference to the NSD monitoring param descriptor
+ leaf nsd-mon-param-ref {
+ description "Reference to the NSD monitoring param descriptor
that produced this result";
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd[nsd:id = current()/" +
- "../../nsr:nsd-ref]/nsd:monitoring-param/nsd:id";
+ // TODO: Fix leafref
+ type leafref {
+ path "../../../../project-nsd:nsd-catalog/project-nsd:nsd" +
+ "[project-nsd:id = current()/../../nsd-ref]" +
+ "/project-nsd:monitoring-param/project-nsd:id";
+ }
}
- }
- list vnfr-mon-param-ref {
- description "A list of VNFR monitoring params associated with this monp";
- key "vnfr-id-ref vnfr-mon-param-ref";
+ list vnfr-mon-param-ref {
+ description "A list of VNFR monitoring params associated with this monp";
+ key "vnfr-id-ref vnfr-mon-param-ref";
- leaf vnfr-id-ref {
- description
- "A reference to a vnfr. This is a
+ leaf vnfr-id-ref {
+ description
+ "A reference to a vnfr. This is a
leafref to path:
/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:id";
- type yang:uuid;
- }
+ type yang:uuid;
+ }
- leaf vnfr-mon-param-ref {
- description "A reference to the VNFR monitoring param";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr"
- + "[vnfr:id = current()/../nsr:vnfr-id-ref]"
- + "/vnfr:monitoring-param/vnfr:id";
+ leaf vnfr-mon-param-ref {
+ description "A reference to the VNFR monitoring param";
+ type leafref {
+ path "../../../../../vnfr:vnfr-catalog/vnfr:vnfr"
+ + "[vnfr:id = current()/../vnfr-id-ref]"
+ + "/vnfr:monitoring-param/vnfr:id";
+ }
}
}
}
- }
- list config-agent-job {
- key "job-id";
+ list config-agent-job {
+ key "job-id";
- leaf job-id {
- description "config agent job Identifier for the NS.";
- type uint64;
- }
+ leaf job-id {
+ description "config agent job Identifier for the NS.";
+ type uint64;
+ }
- leaf job-name {
- description "Config agent job name";
- type string;
- }
+ leaf job-name {
+ description "Config agent job name";
+ type string;
+ }
- leaf job-status {
- description
+ leaf job-status {
+ description
"Job status to be set based on each VNF primitive execution,
pending - if at least one VNF is in pending state
and remaining VNFs are in success state.
Success - if all VNF executions are in success state
failure - if one of the VNF executions is failure";
- type enumeration {
- enum pending;
- enum success;
- enum failure;
+ type enumeration {
+ enum pending;
+ enum success;
+ enum failure;
+ }
}
- }
- leaf triggered-by {
- description "The primitive is triggered from NS or VNF level";
- type trigger-type;
- }
+ leaf triggered-by {
+ description "The primitive is triggered from NS or VNF level";
+ type trigger-type;
+ }
- leaf create-time {
- description
- "Creation timestamp of this Config Agent Job.
+ leaf create-time {
+ description
+ "Creation timestamp of this Config Agent Job.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
-
- leaf job-status-details {
- description "Config agent job status details, in case of errors";
- type string;
- }
-
- uses manotypes:primitive-parameter-value;
+ type uint32;
+ }
- list parameter-group {
- description
- "List of NS Primitive parameter groups";
- key "name";
- leaf name {
- description
- "Name of the parameter.";
+ leaf job-status-details {
+ description "Config agent job status details, in case of errors";
type string;
}
uses manotypes:primitive-parameter-value;
- }
- list vnfr {
- key "id";
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
- }
- leaf vnf-job-status {
+ list parameter-group {
description
- "Job status to be set based on each VNF primitive execution,
- pending - if at least one primitive is in pending state
- and remaining primitives are in success state.
- Success - if all primitive executions are in success state
- failure - if one of the primitive executions is failure";
- type enumeration {
- enum pending;
- enum success;
- enum failure;
- }
- }
-
- list primitive {
+ "List of NS Primitive parameter groups";
key "name";
leaf name {
- description "the name of the primitive";
+ description
+ "Name of the parameter.";
type string;
}
uses manotypes:primitive-parameter-value;
+ }
- leaf execution-id {
- description "Execution id of the primitive";
- type string;
+ list vnfr {
+ key "id";
+ leaf id {
+ description "Identifier for the VNFR.";
+ type yang:uuid;
}
- leaf execution-status {
- description "status of the Execution";
+ leaf vnf-job-status {
+ description
+ "Job status to be set based on each VNF primitive execution,
+ pending - if at least one primitive is in pending state
+ and remaining primitives are in success state.
+ Success - if all primitive executions are in success state
+ failure - if one of the primitive executions is failure";
type enumeration {
enum pending;
enum success;
enum failure;
}
}
- leaf execution-error-details {
- description "Error details if execution-status is failure";
- type string;
+
+ list primitive {
+ key "name";
+ leaf name {
+ description "the name of the primitive";
+ type string;
+ }
+
+ uses manotypes:primitive-parameter-value;
+
+ leaf execution-id {
+ description "Execution id of the primitive";
+ type string;
+ }
+ leaf execution-status {
+ description "status of the Execution";
+ type enumeration {
+ enum pending;
+ enum success;
+ enum failure;
+ }
+ }
+ leaf execution-error-details {
+ description "Error details if execution-status is failure";
+ type string;
+ }
}
}
}
}
}
+ grouping rpc-common {
+ uses manotypes:rpc-project-name;
+
+ leaf nsr_id_ref {
+ description "Reference to NSR ID ref";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/nsr:project-name]/nsr:ns-instance-config/nsr:nsr/nsr:id";
+ }
+ mandatory true;
+ }
+ }
+
rpc get-ns-service-primitive-values {
description "Get the service primitive parameter values";
- input {
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- mandatory true;
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ input {
leaf name {
description "Name of the NS service primitive group";
mandatory true;
type string;
}
+
+ uses rpc-common;
}
output {
description
"A reference to a vnfd. This is a
leafref to path:
- ../../../../nsd:constituent-vnfd
- + [nsd:id = current()/../nsd:id-ref]
- + /nsd:vnfd-id-ref
- NOTE: confd limitations prevent the use of xpath";
+ ../../../../project-nsd:constituent-vnfd
+ + [project-nsd:id = current()/../project-nsd:id-ref]
+ + /project-nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
type string;
}
type string;
}
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf triggered-by {
description "The primitive is triggered from NS or VNF level";
type string;
}
- leaf nsr_id_ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf triggered-by {
description "The primitive is triggered from NS or VNF level";
description "Executes scale out request";
input {
-
- leaf nsr-id-ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf scaling-group-name-ref {
description "name of the scaling group";
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/nsr:project-name]/nsr:ns-instance-config/nsr:nsr" +
+ "[nsr:id=current()/../nsr:nsr_id_ref]/nsr:nsd" +
+ "/nsr:scaling-group-descriptor/nsr:name";
+ }
+ mandatory true;
}
leaf instance-id {
description "id of the scaling group";
- type uint64;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/nsr:project-name]/nsr:ns-instance-config/nsr:nsr" +
+ "[nsr:id=current()/../nsr:nsr_id_ref]" +
+ "/nsr:scaling-group[nsr:scaling-group-name-ref=current()/.." +
+ "/nsr:scaling-group-name-ref]/nsr:instance/nsr:id";
+ }
+ mandatory true;
}
description "Executes scale out request";
input {
-
- leaf nsr-id-ref {
- description "Reference to NSR ID ref";
- type leafref {
- path "/nsr:ns-instance-config/nsr:nsr/nsr:id";
- }
- }
+ uses rpc-common;
leaf scaling-group-name-ref {
description "name of the scaling group";
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/nsr:project-name]/nsr:ns-instance-config/nsr:nsr" +
+ "[nsr:id=current()/../nsr:nsr_id_ref]/nsr:nsd" +
+ "/nsr:scaling-group-descriptor/nsr:name";
+ }
+ mandatory true;
}
leaf instance-id {
description "id of the scaling group";
type uint64;
}
-
}
+
output {
leaf instance-id {
description "id of the scaling group";
}
}
+ rpc start-network-service {
+ description "Start the network service";
+ input {
+ leaf name {
+ mandatory true;
+ description "Name of the Network Service";
+ type string;
+ }
+
+ uses manotypes:rpc-project-name;
+
+ leaf nsd_id_ref {
+ description "Reference to NSD ID ref";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/project-name]/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:id";
+ }
+ }
+ uses ns-instance-config-params-common;
+
+ list vnfd-placement-group-maps {
+ description
+ "Mapping from mano-placement groups construct from VNFD to cloud
+ platform placement group construct";
+
+ key "placement-group-ref vnfd-id-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../project-nsd:constituent-vnfd
+ + [id = current()/../project-nsd:id-ref]
+ + /project-nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resovled this will switched to use
+ leafref";
+ type yang:uuid;
+ }
+
+ leaf placement-group-ref {
+ description
+ "A reference to VNFD placement group";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/" +
+ "../../project-name]/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = " +
+ "current()/../vnfd-id-ref]/project-vnfd:placement-groups/project-vnfd:name";
+ }
+ }
+
+ uses manotypes:placement-group-input;
+
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "List of authorized ssh keys as part of cloud-config";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../../../" +
+ "project-name]/key-pair/name";
+ }
+ }
+ }
+
+ list user {
+ key "name";
+
+ description "List of users to be added through cloud-config";
+ leaf name {
+ description "Name of the user ";
+ type string;
+ }
+ leaf user-info {
+ description "The user name's real name";
+ type string;
+ }
+ list ssh-authorized-key {
+ key "key-pair-ref";
+
+ description "Used to configure the list of public keys to be injected as part
+ of ns instantiation";
+
+ leaf key-pair-ref {
+ description "A reference to the key pair entry in the global key pair table";
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/" +
+ "../../../../project-name]/key-pair/name";
+ }
+ }
+ }
+ }
+ }
+ }
+
+ output {
+ leaf nsr-id {
+ description "Automatically generated parameter";
+ type yang:uuid;
+ }
+ }
+ }
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:pnfd";
prefix "pnfd";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
- container pnfd-catalog {
+ augment "/rw-project:project" {
+ container pnfd-catalog {
- list pnfd {
- key "id";
+ list pnfd {
+ key "id";
- leaf id {
- description "Identifier for the PNFD.";
- type yang:uuid;
- }
+ leaf id {
+ description "Identifier for the PNFD.";
+ type yang:uuid;
+ }
- leaf name {
- description "PNFD name.";
- type string;
- }
+ leaf name {
+ description "PNFD name.";
+ type string;
+ }
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
- leaf vendor {
- description "Vendor of the PNFD.";
- type string;
- }
+ leaf vendor {
+ description "Vendor of the PNFD.";
+ type string;
+ }
- leaf description {
- description "Description of the PNFD.";
- type string;
- }
+ leaf description {
+ description "Description of the PNFD.";
+ type string;
+ }
- leaf version {
- description "Version of the PNFD";
- type string;
- }
+ leaf version {
+ description "Version of the PNFD";
+ type string;
+ }
- list connection-point {
- description
+ list connection-point {
+ description
"List for external connection points. Each PNF has one or more external
connection points.";
- key "id";
- leaf id {
- description
+ key "id";
+ leaf id {
+ description
"Identifier for the external connection points";
- type uint64;
- }
+ type uint64;
+ }
- leaf cp-type {
- description
+ leaf cp-type {
+ description
"Type of the connection point.";
- type manotypes:connection-point-type;
+ type manotypes:connection-point-type;
+ }
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:project-nsd-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <priority>
+ <lower-than>
+ <role>rw-project:project-admin</role>
+ </lower-than>
+ </priority>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <priority>
+ <higher-than>
+ <role>rw-project-mano:catalog-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:account-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:lcm-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project:project-oper</role>
+ </higher-than>
+ <higher-than>
+ <role>rw-project-mano:lcm-admin</role>
+ </higher-than>
+ </priority>
+
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:project-nsd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-nsd:nsd-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module project-nsd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/project-nsd";
+ prefix "project-nsd";
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import nsd-base {
+ prefix "nsd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Network Service Descriptor (NSD)
+ under projects";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+
+ grouping nsd-constituent-vnfd {
+ list constituent-vnfd {
+ description
+ "List of VNFDs that are part of this
+ network service.";
+
+ key "member-vnf-index";
+
+ leaf member-vnf-index {
+ description
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "/rw-project:project[rw-project:name = current()/../../../../rw-project:name]" +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+
+ leaf start-by-default {
+ description
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
+ }
+ }
+ }
+
+ grouping nsr-nsd-constituent-vnfd {
+ list constituent-vnfd {
+ description
+ "List of VNFDs that are part of this
+ network service.";
+
+ key "member-vnf-index";
+
+ leaf member-vnf-index {
+ description
+ "Identifier/index for the VNFD. This separate id
+ is required to ensure that multiple VNFs can be
+ part of single NS";
+ type uint64;
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "/rw-project:project[rw-project:name = current()/../../../../../rw-project:name]" +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+
+ leaf start-by-default {
+ description
+ "VNFD is started as part of the NS instantiation";
+ type boolean;
+ default true;
+ }
+ }
+ }
+
+ grouping nsd-vld {
+ list vld {
+
+ key "id";
+
+ uses nsd-base:nsd-vld-common;
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref vnfd-connection-point-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../constituent-vnfd
+ + [id = current()/../id-ref]
+ + /vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+ //type string;
+ type leafref {
+ path "../../../constituent-vnfd[member-vnf-index = current()/../member-vnf-index-ref]/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-connection-point-ref {
+ description "A reference to a connection point name";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ "[project-vnfd:id = current()/../vnfd-id-ref]/" +
+ "project-vnfd:connection-point/project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-vld {
+ list vld {
+
+ key "id";
+
+ uses nsd-base:nsd-vld-common;
+
+ list vnfd-connection-point-ref {
+ description
+ "A list of references to connection points.";
+ key "member-vnf-index-ref vnfd-connection-point-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+ type string;
+ }
+
+ leaf vnfd-connection-point-ref {
+ description "A reference to a connection point name";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ "[project-vnfd:id = current()/../vnfd-id-ref]/" +
+ "project-vnfd:connection-point/project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-vnf-dependency {
+ list vnf-dependency {
+ description
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ leaf vnf-depends-on-ref {
+ description
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-vnf-dependency {
+ list vnf-dependency {
+ description
+ "List of VNF dependencies.";
+ key vnf-source-ref;
+ leaf vnf-source-ref {
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ leaf vnf-depends-on-ref {
+ description
+ "Reference to VNF that sorce VNF depends.";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+
+ grouping nsd-placement-groups {
+ list placement-groups {
+ description "List of placement groups at NS level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vnfd {
+ description
+ "List of VNFDs that are part of this placement group";
+
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-placement-groups {
+ list placement-groups {
+ description "List of placement groups at NS level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vnfd {
+ description
+ "List of VNFDs that are part of this placement group";
+
+ key "member-vnf-index-ref";
+
+ leaf member-vnf-index-ref {
+ description "member VNF index of this member VNF";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "Identifier for the VNFD.";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-monitoring-param {
+
+ list monitoring-param {
+ key "id";
+
+ uses nsd-base:monitoring-param-common;
+
+ list vnfd-monitoring-param {
+ description "A list of VNFD monitoring params";
+ key "member-vnf-index-ref vnfd-monitoring-param-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" +
+ "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-monitoring-param-ref {
+ description "A reference to the VNFD monitoring param";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:monitoring-param/project-vnfd:id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description
+ "Optional reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsr-nsd-monitoring-param {
+ list monitoring-param {
+ key "id";
+
+ uses nsd-base:monitoring-param-common;
+
+ list vnfd-monitoring-param {
+ description "A list of VNFD monitoring params";
+ key "member-vnf-index-ref vnfd-monitoring-param-ref";
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref
+ NOTE: An issue with confd is preventing the
+ use of xpath. Seems to be an issue with leafref
+ to leafref, whose target is in a different module.
+ Once that is resolved this will switched to use
+ leafref";
+
+ type string;
+ }
+
+ leaf vnfd-monitoring-param-ref {
+ description "A reference to the VNFD monitoring param";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:monitoring-param/project-vnfd:id";
+ }
+ }
+
+ leaf member-vnf-index-ref {
+ description
+ "Optional reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+ }
+ }
+ }
+
+ grouping nsd-service-primitive {
+ list service-primitive {
+ description
+ "Network service level service primitives.";
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ uses manotypes:ui-primitive-group;
+
+ list vnf-primitive-group {
+ description
+ "List of service primitives grouped by VNF.";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a leafref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type leafref {
+ path "../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:name";
+ }
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
+
+ grouping nsr-nsd-service-primitive {
+ list service-primitive {
+ description
+ "Network service level service primitives.";
+
+ key "name";
+
+ leaf name {
+ description
+ "Name of the service primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters for the service primitive.";
+
+ key "name";
+ uses manotypes:primitive-parameter;
+ }
+
+ uses manotypes:ui-primitive-group;
+
+ list vnf-primitive-group {
+ description
+ "List of service primitives grouped by VNF.";
+
+ key "member-vnf-index-ref";
+ leaf member-vnf-index-ref {
+ description
+ "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../constituent-vnfd/member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a leafref";
+
+ type leafref {
+ path "../../../constituent-vnfd" +
+ "[member-vnf-index = current()/../member-vnf-index-ref]" + "/vnfd-id-ref";
+ }
+ }
+
+ leaf vnfd-name {
+ description
+ "Name of the VNFD";
+ type leafref {
+ path "../../../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vnfd-id-ref]"
+ + "/project-vnfd:name";
+ }
+ }
+
+ list primitive {
+ key "index";
+
+ leaf index {
+ description "Index of this primitive";
+ type uint32;
+ }
+
+ leaf name {
+ description "Name of the primitive in the VNF primitive ";
+ type string;
+ }
+ }
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ }
+
+ grouping nsd-descriptor {
+ uses nsd-base:nsd-descriptor-common;
+
+ uses nsd-vld;
+
+ uses nsd-constituent-vnfd;
+
+ uses nsd-placement-groups;
+
+ uses nsd-vnf-dependency;
+
+ uses nsd-monitoring-param;
+
+ uses nsd-service-primitive;
+ }
+
+ augment "/rw-project:project" {
+ container nsd-catalog {
+
+ list nsd {
+ key id;
+
+ uses nsd-descriptor;
+ }
+ }
+ }
+}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:project-vnfd-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:project-vnfd-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/project-vnfd:vnfd-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module project-vnfd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/project-vnfd";
+ prefix "project-vnfd";
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the Virtual Network Function (VNF) descriptor
+ under a project";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ augment /rw-project:project {
+ container vnfd-catalog {
+ description
+ "Virtual Network Function Descriptor (VNFD).";
+
+ list vnfd {
+ key "id";
+
+ uses vnfd-base:vnfd-descriptor;
+ }
+ }
+ }
+}
+
+// vim: sw=2
--- /dev/null
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-nsd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-nsd-base";
+ prefix "rw-nsd-base";
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ grouping to extend the base MANO NSD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping rw-nsd-ext {
+ uses manotypes:control-param;
+ uses manotypes:action-param;
+ leaf meta {
+ description
+ "Any meta-data needed by the UI";
+ type string;
+ }
+ }
+}
+
+// vim: sw=2
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-nsd";
prefix "rw-nsd";
- import nsd {
- prefix "nsd";
+ import rw-nsd-base {
+ prefix "rw-nsd-base";
+ }
+
+ import vnfd {
+ prefix "vnfd";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import rw-vnfd {
+ prefix "rwvnfd";
}
- import ietf-yang-types {
- prefix "yang";
+ import nsd {
+ prefix "nsd";
}
- import mano-types {
- prefix "manotypes";
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
"Derived from earlier versions of base YANG files";
}
+ grouping nsd-config-parameter{
+ list config-parameter-map {
+ key "id";
+ description "A mapping of VNF config parameter
+ requests and sources within this network service";
+ leaf id {
+ description "Identfier for VNF map";
+ type string;
+ }
+ container config-parameter-request {
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../nsd:constituent-vnfd/nsd:member-vnf-index";
+ }
+ }
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd.";
+
+ type leafref {
+ path "../../../nsd:constituent-vnfd[nsd:member-vnf-index = current()/../member-vnf-index-ref]/nsd:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-request-ref {
+ description "Reference to the request in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path
+ "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = current()/../vnfd-id-ref]" +
+ "/rwvnfd:config-parameter/rwvnfd:config-parameter-request/rwvnfd:name";
+ }
+ }
+ }
+ container config-parameter-source {
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../nsd:constituent-vnfd/nsd:member-vnf-index";
+ }
+ }
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd.";
+ type leafref {
+ path "../../../nsd:constituent-vnfd[nsd:member-vnf-index = current()/../member-vnf-index-ref]/nsd:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-source-ref {
+ description "Reference to the source in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path
+ "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = current()/../vnfd-id-ref]" +
+ "/rwvnfd:config-parameter/rwvnfd:config-parameter-source/rwvnfd:name";
+ }
+ }
+ }
+ }
+ }
+
augment /nsd:nsd-catalog/nsd:nsd {
- uses manotypes:control-param;
- uses manotypes:action-param;
- leaf meta {
- description
- "Any meta-data needed by the UI";
+ uses rw-nsd-base:rw-nsd-ext;
+ uses nsd-config-parameter;
+ }
+
+ augment /nsd:nsd-catalog/nsd:nsd/nsd:service-primitive/nsd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /nsd:nsd-catalog/nsd:nsd/nsd:service-primitive/nsd:parameter-group/nsd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /nsd:nsd-catalog/nsd:nsd/nsd:vld {
+ leaf ipv4-nat-pool-name{
type string;
+ description "IPV4 nat pool name";
+ }
+
+ list virtual-connection-points {
+ description
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VNFs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../nsd:vnfd-connection-point-ref/nsd:vnfd-connection-point-ref";
+ }
+ }
}
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix nsr;
}
- tailf:annotate "/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" {
- tailf:callpoint rw_callpoint;
+ import rw-project {
+ prefix "rw-project";
}
- tailf:annotate "/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" {
+ tailf:annotate "/rw-project:project/nsr:ns-instance-opdata/nsr:nsr/rw-nsr:operational-events" {
tailf:callpoint rw_callpoint;
}
}
/*
- *
- * Copyright 2016 RIFT.IO Inc
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
{
namespace "http://riftio.com/ns/riftware-1.0/rw-nsr";
prefix "rw-nsr";
-
+
import mano-types {
prefix "manotypes";
}
prefix "nsd";
}
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import project-nsd {
+ prefix "project-nsd";
+ }
+
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import mano-rift-groupings {
+ prefix "mano-rift";
+ }
+
import rw-cloud {
prefix "rw-cloud";
}
+ import rw-ro-account {
+ prefix "rw-ro-account";
+ }
+
import rw-config-agent {
prefix "rw-config-agent";
}
prefix "rw-sdn";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
import ietf-yang-types {
prefix "yang";
}
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
revision 2015-09-10 {
description
grouping rw-ns-instance-config {
- leaf cloud-account {
- description
- "The configured cloud account which the NSR is instantiated within.
- All VDU's, Virtual Links, and provider networks will be requested
- using the cloud-account's associated CAL instance";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
-
- leaf om-datacenter {
- description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
- type string;
- }
- list vnf-cloud-account-map {
+ list vnf-datacenter-map {
description
"Mapping VNF to Cloud Account where VNF will be instantiated";
type uint64;
}
- leaf cloud-account {
+ leaf datacenter {
description
- "The configured cloud account where VNF is instantiated within.
- All VDU's, Virtual Links, and provider networks will be requested
- using the cloud-account's associated CAL instance";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
-
- leaf om-datacenter {
- description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "datacenter name to use when instantiating
+ the network service.";
type string;
}
The configuration for this VNF will be driven using the specified config
agent account";
type leafref {
- path "/rw-config-agent:config-agent/rw-config-agent:account/rw-config-agent:name";
+ path "../../../../rw-config-agent:config-agent/" +
+ "rw-config-agent:account/rw-config-agent:name";
}
}
}
- list vl-cloud-account-map {
+ list vl-datacenter-map {
description
"Mapping VL to Cloud Account where VL will be instantiated";
type string;
}
- leaf-list cloud-accounts {
+ leaf-list datacenters {
description
- "The configured list of cloud accounts where VL is instantiated.
- All VDU's, Virtual Links, and provider networks will be requested
- using the cloud-account's associated CAL instance";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
-
- leaf-list om-datacenters {
- description
- "Openmano datacenter names to use when instantiating
- the VLs. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "datacenter names to use when instantiating
+ the VLs.";
type string;
}
}
- }
-
+
+ leaf resource-orchestrator {
+ description
+ "Resource Orchestrator to use when instantiating the VNF.";
+ type leafref {
+ path "../../../rw-ro-account:ro-account/rw-ro-account:account/rw-ro-account:name";
+ }
+ }
+
+ leaf datacenter {
+ description
+ "datacenter name to use when instantiating
+ the network service.";
+ type string;
+ }
+
+ }
- augment /nsr:ns-instance-config/nsr:nsr {
- uses rw-ns-instance-config;
- }
- augment /nsr:start-network-service/nsr:input{
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr {
uses rw-ns-instance-config;
}
- augment /nsr:ns-instance-opdata/nsr:nsr {
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr {
uses manotypes:action-param;
uses manotypes:control-param;
+ container orchestration-progress {
+ container vms {
+ leaf active {
+ type uint32;
+ default 0;
+ }
+
+ leaf total {
+ type uint32;
+ default 0;
+ }
+ }
+ container networks {
+ leaf active {
+ type uint32;
+ default 0;
+ }
+
+ leaf total {
+ type uint32;
+ default 0;
+ }
+ }
+ }
+
leaf sdn-account {
description
"The SDN account associted with the cloud account using which an
NS was instantiated.";
type leafref {
- path "/rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
+ path "../../../rw-sdn:sdn/rw-sdn:account/rw-sdn:name";
}
}
uses operational-events;
}
+ grouping project-nsr-nsd-config-parameter{
+ list config-parameter-map {
+ key "id";
+ description "A mapping of VNF config parameter
+ requests and sources within this network service";
+ leaf id {
+ description "Identfier for VNF map";
+ type string;
+ }
+ container config-parameter-request {
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../nsr:constituent-vnfd/nsr:member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../nsr:constituent-vnfd
+ + [nsr:id = current()/../id-ref]
+ + /vnfd-id-ref";
+
+ type leafref {
+ path "../../../nsr:constituent-vnfd[nsr:member-vnf-index = current()/../member-vnf-index-ref]/nsr:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-request-ref {
+ description "Reference to the request in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path "../../../../../.." +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = current()/../vnfd-id-ref]" +
+ "/rw-project-vnfd:config-parameter/rw-project-vnfd:config-parameter-request/rw-project-vnfd:name";
+ }
+ }
+ }
+ container config-parameter-source {
- augment /nsr:ns-instance-opdata/nsr:nsr/nsr:vlr {
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../nsr:constituent-vnfd/nsr:member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../nsd:constituent-vnfd
+ + [nsd:id = current()/../nsd:id-ref]
+ + /nsd:vnfd-id-ref";
+
+ type leafref {
+ path "../../../nsr:constituent-vnfd[nsr:member-vnf-index = current()/../member-vnf-index-ref]/nsr:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-source-ref {
+ description "Reference to the source in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path "../../../../../.." +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = current()/../vnfd-id-ref]" +
+ "/rw-project-vnfd:config-parameter/rw-project-vnfd:config-parameter-source/rw-project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:vlr {
leaf assigned-subnet {
description "Subnet added for the VL";
type string;
}
- leaf cloud-account {
+ leaf datacenter {
description
- "The configured cloud account in which the VL is instantiated within.";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
- leaf om-datacenter {
- description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "Datacenter name to use when instantiating
+ the network service. ";
type string;
}
}
- augment /nsr:ns-instance-opdata/nsr:nsr/nsr:constituent-vnfr-ref {
- leaf cloud-account {
- description
- "The configured cloud account in which the VNF is instantiated within.
- All VDU's, Virtual Links, and provider networks will be requested
- using the cloud-account's associated CAL instance";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
- leaf om-datacenter {
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:constituent-vnfr-ref {
+ leaf datacenter {
description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "Datacenter name to use when instantiating
+ the network service.";
type string;
}
}
- augment /nsr:ns-instance-config {
+ augment /rw-project:project/nsr:ns-instance-config {
leaf nfvi-polling-period {
description
"Defines the period (secons) that the NFVI metrics are polled at";
}
}
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:nsd/nsr:vld {
+ leaf ipv4-nat-pool-name{
+ type string;
+ description "IPV4 nat pool name";
+ }
+
+ list virtual-connection-points {
+ description
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VNFs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../nsr:vnfd-connection-point-ref/nsr:vnfd-connection-point-ref";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:nsd {
+ uses project-nsr-nsd-config-parameter;
+ }
+
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr {
+ list vnf-input-parameter {
+ description
+ "List of input parameters for Constituent VNFs that can be specified when
+ instantiating a network service.";
+
+ key "member-vnf-index-ref vnfd-id-ref";
+
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../nsr:nsd/nsr:constituent-vnfd/nsr:member-vnf-index";
+ }
+ }
+
+ leaf vnfd-id-ref {
+ description
+ "A reference to a VNFD";
+ type leafref {
+ path "../../nsr:nsd/nsr:constituent-vnfd/nsr:vnfd-id-ref";
+ }
+ }
+
+ uses manotypes:input-parameter;
+ }
+ }
+
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr {
+ uses mano-rift:ssh-key-generated;
+ }
+
+
+ grouping leaf-out {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:nsd/nsr:service-primitive/nsr:parameter {
+ uses leaf-out;
+ }
+
+ augment /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:nsd/nsr:service-primitive/nsr:parameter-group/nsr:parameter {
+ uses leaf-out;
+ }
+
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:service-primitive/nsr:parameter {
+ uses leaf-out;
+ }
+
+ augment /rw-project:project/nsr:ns-instance-opdata/nsr:nsr/nsr:service-primitive/nsr:parameter-group/nsr:parameter {
+ uses leaf-out;
+ }
+
notification nsm-notification {
description "Notification for NSM Events.
The timestamp of this event is automatically expressed
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-project-nsd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-nsd";
+ prefix "rw-project-nsd";
+
+ import rw-nsd-base {
+ prefix "rw-nsd-base";
+ }
+
+ import project-nsd {
+ prefix "project-nsd";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file augments
+ the base MANO NSD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping project-nsd-config-parameter{
+ list config-parameter-map {
+ key "id";
+ description "A mapping of VNF config parameter
+ requests and sources within this network service";
+ leaf id {
+ description "Identfier for VNF map";
+ type string;
+ }
+ container config-parameter-request {
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../project-nsd:constituent-vnfd/project-nsd:member-vnf-index";
+ }
+ }
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../project-nsd:constituent-vnfd
+ + [project-nsd:id = current()/../id-ref]
+ + /project-nsd:vnfd-id-ref";
+
+ type leafref {
+ path "../../../project-nsd:constituent-vnfd[project-nsd:member-vnf-index = current()/../member-vnf-index-ref]/project-nsd:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-request-ref {
+ description "Reference to the request in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path "../../../../.." +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = current()/../vnfd-id-ref]" +
+ "/rw-project-vnfd:config-parameter/rw-project-vnfd:config-parameter-request/rw-project-vnfd:name";
+ }
+ }
+ }
+ container config-parameter-source {
+ leaf member-vnf-index-ref {
+ description "Reference to member-vnf within constituent-vnfds";
+ type leafref {
+ path "../../../project-nsd:constituent-vnfd/project-nsd:member-vnf-index";
+ }
+ }
+ leaf vnfd-id-ref {
+ description
+ "A reference to a vnfd. This is a
+ leafref to path:
+ ../../project-nsd:constituent-vnfd
+ + [project-nsd:id = current()/../id-ref]
+ + /project-nsd:vnfd-id-ref";
+
+ type leafref {
+ path "../../../project-nsd:constituent-vnfd[project-nsd:member-vnf-index = current()/../member-vnf-index-ref]/project-nsd:vnfd-id-ref";
+ }
+ }
+ leaf config-parameter-source-ref {
+ description "Reference to the source in the VNF
+ with the specified member-vnf-index";
+ type leafref {
+ path "../../../../.." +
+ "/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id = current()/../vnfd-id-ref]" +
+ "/rw-project-vnfd:config-parameter/rw-project-vnfd:config-parameter-source/rw-project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd {
+ uses rw-nsd-base:rw-nsd-ext;
+ }
+
+ augment /rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:service-primitive/project-nsd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:service-primitive/project-nsd:parameter-group/project-nsd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:vld {
+ leaf ipv4-nat-pool-name{
+ type string;
+ description "IPV4 nat pool name";
+ }
+
+ list virtual-connection-points {
+ description
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VNFs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../project-nsd:vnfd-connection-point-ref/project-nsd:vnfd-connection-point-ref";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd {
+ uses project-nsd-config-parameter;
+ }
+}
+
+// vim: sw=2
--- /dev/null
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-project-vnfd
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-vnfd";
+ prefix "rw-project-vnfd";
+
+ import project-vnfd {
+ prefix "project-vnfd";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import rw-vnfd-base {
+ prefix "rw-vnfd-base";
+ }
+
+ import vnfd {
+ prefix "vnfd";
+ }
+
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import mano-rift-groupings {
+ prefix "mano-rift";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file augments
+ the base MANO VNFD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping vnfd-config-parameter {
+ container config-parameter {
+ description
+ "List of VNF config parameter requests and sources";
+ list config-parameter-source {
+ description "The list of parameters exposed by this VNF";
+ key "name";
+
+ leaf name {
+ description "Name of the source";
+ type string {
+ length "1..128";
+ }
+ }
+
+ leaf description {
+ description " Description of the source";
+ type string;
+ }
+
+ choice source {
+ case descriptor {
+ leaf descriptor {
+ description
+ "Location of this source as an xpath.
+ For example:
+ ../../../mgmt-interface/port";
+ type string;
+ }
+ }
+
+ case attribute {
+ leaf attribute {
+ description
+ "Location of this source as runtime attribute.
+ The value is <xpath>, <attribute_name>
+ For example:
+ ../../../mgmt-interface, ip-address
+ which retruns the ip-address assigned to the
+ mgmt-interface after VNF instantiation.";
+ type string;
+ }
+ }
+
+ case primitive-ref {
+ leaf config-primitive-name-ref {
+ description
+ "A leafref to configuration primitive.
+ This refers to a config parameter whose
+ output parameter is referred in out-parameter.";
+ type leafref {
+ path "../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive/project-vnfd:name";
+ }
+ }
+
+ leaf parameter-ref {
+ description
+ "Name of the output parameter in the config primitiive";
+ type leafref {
+ path
+ "../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive[project-vnfd:name=current()/../config-primitive-name-ref]/project-vnfd:parameter/project-vnfd:name";
+ }
+ }
+ }
+
+ case value {
+ leaf value {
+ description
+ "Pre-defined value to be used for this source";
+ type string;
+ }
+ }
+ }
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive/project-vnfd:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive[project-vnfd:name=current()/../config-primitive-name-ref]/project-vnfd:parameter/project-vnfd:name";
+ }
+ }
+ }
+ }
+
+ list config-parameter-request {
+ description "The list of requests for this VNF";
+ key "name";
+
+ leaf name {
+ description "Name of this parameter request";
+ type string {
+ length "1..128";
+ }
+ }
+
+ leaf description {
+ description "Description of this request";
+ type string;
+ }
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive/project-vnfd:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../project-vnfd:vnf-configuration/project-vnfd:config-primitive[project-vnfd:name=current()/../config-primitive-name-ref]/project-vnfd:parameter/project-vnfd:name";
+ }
+ }
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd {
+ uses rw-vnfd-base:rw-vnfd-ext;
+ uses vnfd-config-parameter;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:mgmt-interface {
+ uses rw-vnfd-base:ssh-key;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:http-endpoint {
+ uses mano-rift:http-end-point-additions;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu/project-vnfd:supplemental-boot-data {
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu/project-vnfd:volumes {
+ uses mano-rift:volume-info-additions;
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu/project-vnfd:interface {
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+
+ leaf floating-ip-needed{
+ type boolean;
+ default "false";
+ description
+ "Sole purpose of this field is to facilitate translation of VNFD
+ to other VNFMs";
+ }
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu/project-vnfd:volumes/project-vnfd:volume-source {
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vnf-configuration/project-vnfd:initial-config-primitive/project-vnfd:primitive-type {
+ case primitive-ref {
+ leaf config-primitive-ref {
+ description
+ "Reference to a config primitive name.
+ NOTE: The config primitive referred should have
+ all the input parameters predefined either
+ with default values or dependency references.";
+ type leafref {
+ path "../../project-vnfd:config-primitive/project-vnfd:name";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:internal-vld {
+ list virtual-connection-points {
+ description
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VDUs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../project-vnfd:internal-connection-point/project-vnfd:id-ref";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vdu/project-vnfd:vm-flavor {
+ uses manotypes:vm-flavor-name;
+ }
+
+ augment /rw-project:project/project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:vnf-configuration/project-vnfd:config-primitive/project-vnfd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+}
+// vim: sw=2
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "manotypes";
}
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
import vlr {
prefix "vlr";
}
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
import rw-cloud {
prefix "rw-cloud";
}
prefix "yang";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-30 {
description
"Initial revision. This YANG file augments
"Derived from earlier versions of base YANG files";
}
- augment /vlr:vlr-catalog/vlr:vlr {
- leaf cloud-account {
- description
- "The cloud account to use when requesting resources for
- this vlr";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
- leaf om-datacenter {
+ augment /rw-project:project/vlr:vlr-catalog/vlr:vlr {
+
+ leaf datacenter {
description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "Datacenter name to use when instantiating
+ the network service.";
type string;
}
"The error message in case of a failed VLR operational status";
type string;
}
+
+ list virtual-connection-points {
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ type string;
+ }
+
+ leaf connection-point-id {
+ description "VIM identifier for connection point";
+ type string;
+ }
+
+ leaf ip-address {
+ description "IP Address of virtual connection point";
+ type inet:ip-address;
+ }
+ leaf mac-address {
+ description "MAC Address of the virtual connection point";
+ type string;
+ }
+ }
}
}
--- /dev/null
+
+/*
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module rw-vnfd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd-base";
+ prefix "rw-vnfd-base";
+
+ import vnfd {
+ prefix "vnfd";
+ }
+
+ import rwvcs-types {
+ prefix "rwvcstypes";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ common structs for extending MANO VNFD";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping rw-vnfd-ext {
+ leaf meta {
+ description
+ "Any meta-data needed by the UI";
+ type string;
+ }
+
+ }
+ grouping ssh-key {
+ leaf ssh-key {
+ description
+ "Whether SSH keys need to be generated and passed
+ to the RO and VCA during instantiation.";
+ type boolean;
+ }
+ }
+}
+// vim: sw=2
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-vnfd";
prefix "rw-vnfd";
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
import vnfd {
prefix "vnfd";
}
- import rwvcs-types {
- prefix "rwvcstypes";
+ import rw-vnfd-base {
+ prefix "rw-vnfd-base";
}
- import rw-pb-ext { prefix "rwpb"; }
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
- import ietf-yang-types {
- prefix "yang";
+ import mano-rift-groupings {
+ prefix "mano-rift";
}
import mano-types {
prefix "manotypes";
}
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file augments
"Derived from earlier versions of base YANG files";
}
- augment /vnfd:vnfd-catalog/vnfd:vnfd {
- uses manotypes:control-param;
- uses manotypes:action-param;
- leaf meta {
- description
- "Any meta-data needed by the UI";
- type string;
- }
- list component {
+ grouping vnfd-config-parameter {
+ container config-parameter {
description
- "This section defines the RIFT.ware
- virtual components";
- key "component-name";
- rwpb:msg-new VcsComponent;
- rwpb:application-request-point;
-
- leaf component-name {
- description "";
- type string;
- }
+ "List of VNF config parameter requests and sources";
+ list config-parameter-source {
+ description "The list of parameters exposed by this VNF";
+ key "name";
- leaf component-type {
- description "";
- type rwvcstypes:component_type;
- mandatory true;
- }
+ leaf name {
+ description "Name of the source";
+ type string {
+ length "1..128";
+ }
+ }
+
+ leaf description {
+ description " Description of the source";
+ type string;
+ }
+
+ choice source {
+ case descriptor {
+ leaf descriptor {
+ description
+ "Location of this source as an xpath.
+ For example:
+ ../../../mgmt-interface/port";
+ type string;
+ }
+ }
- choice component {
- case rwvcs-rwcollection {
- uses rwvcstypes:rwvcs-rwcollection;
+ case attribute {
+ leaf attribute {
+ description
+ "Location of this source as runtime attribute.
+ The value is <xpath>, <attribute_name>
+ For example:
+ ../../../mgmt-interface, ip-address
+ which retruns the ip-address assigned to the
+ mgmt-interface after VNF instantiation.";
+ type string;
+ }
+ }
+
+ case primitive-ref {
+ leaf config-primitive-name-ref {
+ description
+ "A leafref to configuration primitive.
+ This refers to a config parameter whose
+ output parameter is referred in out-parameter.";
+ type leafref {
+ path "../../../vnfd:vnf-configuration/vnfd:config-primitive/vnfd:name";
+ }
+ }
+
+ leaf parameter-ref {
+ description
+ "Name of the output parameter in the config primitiive";
+ type leafref {
+ path
+ "../../../vnfd:vnf-configuration/vnfd:config-primitive[vnfd:name=current()/../config-primitive-name-ref]/vnfd:parameter/vnfd:name";
+ }
+ }
+ }
+
+ case value {
+ leaf value {
+ description
+ "Pre-defined value to be used for this source";
+ type string;
+ }
+ }
}
- case rwvcs-rwvm {
- uses rwvcstypes:rwvcs-rwvm;
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../vnfd:vnf-configuration/vnfd:config-primitive/vnfd:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../vnfd:vnf-configuration/vnfd:config-primitive[vnfd:name=current()/../config-primitive-name-ref]/vnfd:parameter/vnfd:name";
+ }
+ }
}
- case rwvcs-rwproc {
- uses rwvcstypes:rwvcs-rwproc;
+ }
+
+ list config-parameter-request {
+ description "The list of requests for this VNF";
+ key "name";
+
+ leaf name {
+ description "Name of this parameter request";
+ type string {
+ length "1..128";
+ }
}
- case native-proc {
- uses rwvcstypes:native-proc;
+
+ leaf description {
+ description "Description of this request";
+ type string;
}
- case rwvcs-rwtasklet {
- uses rwvcstypes:rwvcs-rwtasklet;
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../vnfd:vnf-configuration/vnfd:config-primitive/vnfd:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../vnfd:vnf-configuration/vnfd:config-primitive[vnfd:name=current()/../config-primitive-name-ref]/vnfd:parameter/vnfd:name";
+ }
+ }
}
}
- } // list component
+ }
}
- augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu {
- leaf vcs-component-ref {
+ augment /vnfd:vnfd-catalog/vnfd:vnfd {
+ uses rw-vnfd-base:rw-vnfd-ext;
+ uses vnfd-config-parameter;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:mgmt-interface {
+ uses rw-vnfd-base:ssh-key;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:http-endpoint {
+ uses mano-rift:http-end-point-additions;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:supplemental-boot-data {
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:volumes {
+ uses mano-rift:volume-info-additions;
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:interface {
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+
+ leaf floating-ip-needed{
+ type boolean;
+ default "false";
+ description
+ "Sole purpose of this field is to facilitate translation of VNFD
+ to other VNFMs";
+ }
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:volumes/vnfd:volume-source {
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:internal-vld {
+ list virtual-connection-points {
description
- "This defines the software components using the
- RIFT.ware Virtual Component System (VCS). This
- also allows specifying a state machine during
- the VM startup.
- NOTE: This is an significant addition to MANO,
- since MANO doesn't clearly specify a method to
- identify various software components in a VM.
- Also using a state machine is not something that
- is well described in MANO.";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/rw-vnfd:component/rw-vnfd:component-name";
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VDUs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../vnfd:internal-connection-point/vnfd:id-ref";
+ }
+ }
+ }
+ }
+
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vdu/vnfd:vm-flavor {
+ uses manotypes:vm-flavor-name;
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vnf-configuration/vnfd:config-primitive/vnfd:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vnf-configuration/vnfd:initial-config-primitive/vnfd:primitive-type {
+ case primitive-ref {
+ leaf config-primitive-ref {
+ description
+ "Reference to a config primitive name.
+ NOTE: The config primitive referred should have
+ all the input parameters predefined either
+ with default values or dependency references.";
+ type leafref {
+ path "../../vnfd:config-primitive/vnfd:name";
+ }
}
}
}
+
}
// vim: sw=2
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-vnfr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:rw-vnfr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-vnfr:vnfr-console</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-vnfr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-vnfr:vnfr-console</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix vnfr;
}
- tailf:annotate "/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" {
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:operational-events" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/rw-vnfr:vnfr-console" {
+ tailf:annotate "/rw-project:project/rw-vnfr:vnfr-console" {
tailf:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "manotypes";
}
- import rw-pb-ext { prefix "rwpb"; }
-
import vnfr {
prefix "vnfr";
}
- import vnfd {
- prefix "vnfd";
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import project-vnfd {
+ prefix "project-vnfd";
}
import rw-cloud {
prefix "inet";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import mano-rift-groupings {
+ prefix "mano-rift";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file augments
"Derived from earlier versions of base YANG files";
}
+ typedef vdur-operational-event-types {
+ type enumeration {
+ enum instantiate-rcvd;
+ enum vm-allocation-requested;
+ enum running;
+ enum terminate-rcvd;
+ enum vm-terminate-requested;
+ enum terminated;
+ }
+ }
+
+ typedef vnfr-operational-event-types {
+ type enumeration {
+ enum instantiate-rcvd;
+ enum vl-inited;
+ enum vnf-inited;
+ enum running;
+ enum terminate-rcvd;
+ enum vnf-terminated;
+ enum vl-terminated;
+ enum terminated;
+ }
+ }
+
grouping vnfr-operational-events {
list operational-events {
key "id";
description
"Recent operational events for VNFR
- Though the model does not impose any restrictions on the numbe of events,
- the max operational events will be limited to the most recent 10";
+ Though the model does not impose any restrictions on the numbe of events,
+ the max operational events will be limited to the most recent 10";
leaf id {
description "The id of the instance";
}
leaf event {
description "The event";
- type enumeration {
- rwpb:enum-type "VnfrOperationalEvent";
- enum instantiate-rcvd;
- enum vl-inited;
- enum vnf-inited;
- enum running;
- enum terminate-rcvd;
- enum vnf-terminated;
- enum vl-terminated;
- enum terminated;
- }
+ type vnfr-operational-event-types;
}
leaf description {
description
key "id";
description
"Recent operational events for VDUR
- Though the model does not impose any restrictions on the numbe of events,
- the max operational events will be limited to the most recent 10";
+ Though the model does not impose any restrictions on the numbe of events,
+ the max operational events will be limited to the most recent 10";
leaf id {
description "The id of the instance";
}
leaf event {
description "The event";
- type enumeration {
- rwpb:enum-type "VdurOperationalEvent";
- enum instantiate-rcvd;
- enum vm-allocation-requested;
- enum running;
- enum terminate-rcvd;
- enum vm-terminate-requested;
- enum terminated;
- }
+ type vdur-operational-event-types;
}
leaf description {
description
}
}
- augment /vnfr:vnfr-catalog/vnfr:vnfr {
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr {
uses manotypes:action-param;
uses manotypes:control-param;
- leaf cloud-account {
+ leaf datacenter {
description
- "The cloud account to use when requesting resources for
- this vnf";
- type leafref {
- path "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
- }
- }
-
- leaf om-datacenter {
- description
- "Openmano datacenter name to use when instantiating
- the network service. This is only used when openmano
- is selected as the cloud account. This should be superceded
- by multiple cloud accounts when that becomes available.";
+ "Datacenter name to use when instantiating
+ the network service.";
type string;
}
type uint64;
}
}
-
+
uses manotypes:nfvi-metrics;
}
-
- list component {
- description
- "This section defines the RIFT.ware
- virtual components";
- key "component-name";
- rwpb:msg-new VcsComponentOp;
- rwpb:application-request-point;
-
- leaf component-name {
- description "";
- type string;
- }
-
- leaf component-type {
- description "";
- type rwvcstypes:component_type;
- mandatory true;
- }
-
- choice component {
- case rwvcs-rwcollection {
- uses rwvcstypes:rwvcs-rwcollection;
- }
- case rwvcs-rwvm {
- uses rwvcstypes:rwvcs-rwvm;
- }
- case rwvcs-rwproc {
- uses rwvcstypes:rwvcs-rwproc;
- }
- case native-proc {
- uses rwvcstypes:native-proc;
- }
- case rwvcs-rwtasklet {
- uses rwvcstypes:rwvcs-rwtasklet;
- }
- }
- } // list component
-
uses vnfr-operational-events;
leaf operational-status-details {
}
}
- augment /vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur {
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur {
leaf vm-pool {
description
"The pool from which this vm was allocated from";
uses manotypes:nfvi-metrics;
}
- leaf vcs-component-ref {
- description
- "This defines the software components using the
- RIFT.ware Virtual Component System (VCS). This
- also allows specifying a state machine during
- the VM startup.
- NOTE: This is an significant addition to MANO,
- since MANO doesn't clearly specify a method to
- identify various software components in a VM.
- Also using a state machine is not something that
- is well described in MANO.";
- type leafref {
- path "/vnfr:vnfr-catalog/vnfr:vnfr/rw-vnfr:component/rw-vnfr:component-name";
- }
- }
uses vdur-operational-events;
type string;
}
}
+
grouping vnfd-ref-count {
list vnfd-ref-count {
key "vnfd-id-ref";
leaf vnfd-id-ref {
description "Reference to VNFD";
type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ path "../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
}
}
leaf instance-ref-count {
}
}
}
- augment /vnfr:vnfr-catalog {
+
+ grouping vnfd-config-parameter {
+ container config-parameter {
+ description
+ "List of VNF config parameter requests and sources";
+ list config-parameter-source {
+ description "The list of parameters exposed by this VNF";
+ key "name";
+
+ leaf name {
+ description "Name of the source";
+ type string {
+ length "1..128";
+ }
+ }
+
+ leaf description {
+ description " Description of the source";
+ type string;
+ }
+
+ choice source {
+ case descriptor {
+ leaf descriptor {
+ description
+ "Location of this source as an xpath.
+ For example:
+ ../../../mgmt-interface/port";
+ type string;
+ }
+ }
+
+ case attribute {
+ leaf attribute {
+ description
+ "Location of this source as runtime attribute.
+ The value is <xpath>, <attribute_name>
+ For example:
+ ../../../mgmt-interface, ip-address
+ which retruns the ip-address assigned to the
+ mgmt-interface after VNF instantiation.";
+ type string;
+ }
+ }
+
+ case primitive-ref {
+ leaf config-primitive-name-ref {
+ description
+ "A leafref to configuration primitive.
+ This refers to a config parameter whose
+ output parameter is referred in out-parameter.";
+ type leafref {
+ path "../../../vnfr:vnf-configuration/vnfr:config-primitive/vnfr:name";
+ }
+ }
+
+ leaf parameter-ref {
+ description
+ "Name of the output parameter in the config primitiive";
+ type leafref {
+ path
+ "../../../vnfr:vnf-configuration/vnfr:config-primitive[vnfr:name=current()/../config-primitive-name-ref]/vnfr:parameter/vnfr:name";
+ }
+ }
+ }
+
+ case value {
+ leaf value {
+ description
+ "Pre-defined value to be used for this source";
+ type string;
+ }
+ }
+ }
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../vnfr:vnf-configuration/vnfr:config-primitive/vnfr:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../vnfr:vnf-configuration/vnfr:config-primitive[vnfr:name=current()/../config-primitive-name-ref]/vnfr:parameter/vnfr:name";
+ }
+ }
+ }
+ }
+
+ list config-parameter-request {
+ description "The list of requests for this VNF";
+ key "name";
+
+ leaf name {
+ description "Name of this parameter request";
+ type string {
+ length "1..128";
+ }
+ }
+
+ leaf description {
+ description "Description of this request";
+ type string;
+ }
+
+ list parameter {
+ key "config-primitive-name-ref";
+
+ leaf config-primitive-name-ref {
+ description
+ "Name of the configuration primitive where this
+ request will used";
+ type leafref {
+ path "../../../../vnfr:vnf-configuration/vnfr:config-primitive/vnfr:name";
+ }
+ }
+
+ leaf config-primitive-parameter-ref {
+ description
+ "Parameter name of the config primitive";
+ type leafref {
+ path "../../../../vnfr:vnf-configuration/vnfr:config-primitive[vnfr:name=current()/../config-primitive-name-ref]/vnfr:parameter/vnfr:name";
+ }
+ }
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog {
uses vnfd-ref-count;
}
- container vnfr-console {
- config false;
- list vnfr {
- key "id";
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
- }
- list vdur {
- description "List of Virtual Deployment Units";
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:mgmt-interface {
+ leaf ssh-key {
+ description
+ "Whether SSH keys need to be generated and passed
+ to the RO and VCA during instantiation.";
+ type boolean;
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vdu/vnfr:vm-flavor {
+ uses manotypes:vm-flavor-name;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vdu/vnfr:interface {
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+
+ leaf floating-ip-needed{
+ type boolean;
+ default "false";
+ description
+ "Sole purpose of this field is to facilitate translation of VNFD
+ to other VNFMs";
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur/vnfr:interface {
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+
+ leaf floating-ip-needed{
+ type boolean;
+ default "false";
+ description
+ "Sole purpose of this field is to facilitate translation of VNFD
+ to other VNFMs";
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur/vnfr:vm-flavor {
+ uses manotypes:vm-flavor-name;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd {
+ leaf meta {
+ description
+ "Any meta-data needed by the UI";
+ type string;
+ }
+
+ uses vnfd-config-parameter;
+ }
+
+ augment /rw-project:project {
+ container vnfr-console {
+ config false;
+ list vnfr {
key "id";
leaf id {
- description "Unique id for the VDU";
+ description "Identifier for the VNFR.";
type yang:uuid;
}
- leaf console-url {
- description "Console URL for this VDU, if available";
- type inet:uri;
+ list vdur {
+ description "List of Virtual Deployment Units";
+ key "id";
+ leaf id {
+ description "Unique id for the VDU";
+ type yang:uuid;
+ }
+ leaf console-url {
+ description "Console URL for this VDU, if available";
+ type inet:uri;
+ }
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:http-endpoint {
+ uses mano-rift:http-end-point-additions;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:http-endpoint {
+ uses mano-rift:http-end-point-additions;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vdu/vnfr:supplemental-boot-data {
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur/vnfr:supplemental-boot-data {
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vdu/vnfr:volumes {
+ uses mano-rift:volume-info-additions;
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vdu/vnfr:volumes/vnfr:volume-source {
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnf-configuration/vnfr:config-primitive/vnfr:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vnf-configuration/vnfr:config-primitive/vnfr:parameter {
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnf-configuration/vnfr:initial-config-primitive/vnfr:primitive-type {
+ case primitive-ref {
+ leaf config-primitive-ref {
+ description
+ "Reference to a config primitive name.
+ NOTE: The config primitive referred should have
+ all the input parameters predefined either
+ with default values or dependency references.";
+ type leafref {
+ path "../../vnfr:config-primitive/vnfr:name";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:vnf-configuration/vnfr:initial-config-primitive/vnfr:primitive-type {
+ case primitive-ref {
+ leaf config-primitive-ref {
+ description
+ "Reference to a config primitive name.
+ NOTE: The config primitive referred should have
+ all the input parameters predefined either
+ with default values or dependency references.";
+ type leafref {
+ path "../../vnfr:config-primitive/vnfr:name";
+ }
+ }
+ }
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vdur/vnfr:volumes {
+ uses mano-rift:volume-info-additions;
+ uses mano-rift:custom-meta-data;
+ }
+
+ augment /rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:vnfd/vnfr:internal-vld {
+ list virtual-connection-points {
+ description
+ "A list of virtual-connection points associated with Virtual Link.
+ These connection points are not directly associated with any VDUs";
+ key name;
+ uses vnfd-base:common-connection-point;
+
+ leaf-list associated-cps {
+ description
+ "A List of connection points associated with virtual connection point";
+ type leafref {
+ path "../../vnfr:internal-connection-point/vnfr:id-ref";
}
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:vld";
prefix "vld";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import vnfd {
- prefix "vnfd";
+ import project-vnfd {
+ prefix "project-vnfd";
}
import ietf-inet-types {
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
- container vld-catalog {
+ augment "/rw-project:project" {
+ container vld-catalog {
- list vld {
- key "id";
+ list vld {
+ key "id";
- leaf id {
- description "Identifier for the VLD.";
- type yang:uuid;
- }
+ leaf id {
+ description "Identifier for the VLD.";
+ type yang:uuid;
+ }
- leaf name {
- description "Virtual Link Descriptor (VLD) name.";
- type string;
- }
+ leaf name {
+ description "Virtual Link Descriptor (VLD) name.";
+ type string;
+ }
- leaf short-name {
- description "Short name for VLD for UI";
- type string;
- }
+ leaf short-name {
+ description "Short name for VLD for UI";
+ type string;
+ }
- leaf vendor {
- description "Provider of the VLD.";
- type string;
- }
+ leaf vendor {
+ description "Provider of the VLD.";
+ type string;
+ }
- leaf description {
- description "Description of the VLD.";
- type string;
- }
+ leaf description {
+ description "Description of the VLD.";
+ type string;
+ }
- leaf version {
- description "Version of the VLD";
- type string;
- }
+ leaf version {
+ description "Version of the VLD";
+ type string;
+ }
- leaf type {
- type manotypes:virtual-link-type;
- }
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
- leaf root-bandwidth {
- description
+ leaf root-bandwidth {
+ description
"For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
+ type uint64;
+ }
- leaf leaf-bandwidth {
- description
+ leaf leaf-bandwidth {
+ description
"For ELAN this is the bandwidth of branches.";
- type uint64;
- }
+ type uint64;
+ }
- list vnfd-connection-point-ref {
- description
+ list vnfd-connection-point-ref {
+ description
"A list of references to connection points.";
- key "vnfd-ref member-vnf-index-ref";
+ key "vnfd-ref member-vnf-index-ref";
- leaf vnfd-ref {
- description "A reference to a vnfd";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:id";
+ leaf vnfd-ref {
+ description "A reference to a vnfd";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
}
- }
- leaf member-vnf-index-ref {
- description
+ leaf member-vnf-index-ref {
+ description
"A reference to the consituent-vnfd id in nsd.
Should have been a leafref to:
- '/nsd:nsd-catalog:/nsd:nsd/constituent-vnfd/member-vnf-index-ref'.
+ '/rw-project:project/project-nsd:nsd-catalog:/nsd/constituent-vnfd/member-vnf-index-ref'.
Instead using direct leaf to avoid circular reference.";
- type uint64;
- }
+ type uint64;
+ }
- leaf vnfd-connection-point-ref {
- description
+ leaf vnfd-connection-point-ref {
+ description
"A reference to a connection point name in a vnfd";
- type leafref {
- path "/vnfd:vnfd-catalog/vnfd:vnfd"
- + "[vnfd:id = current()/../vld:vnfd-ref]"
- + "/vnfd:connection-point/vnfd:name";
+ type leafref {
+ path "../../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd"
+ + "[project-vnfd:id = current()/../vld:vnfd-ref]"
+ + "/project-vnfd:connection-point/project-vnfd:name";
+ }
}
}
- }
- // replicate for pnfd container here
- uses manotypes:provider-network;
+ // replicate for pnfd container here
+ uses manotypes:provider-network;
+ }
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:vlr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:vlr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/vlr:vlr-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:vlr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/vlr:vlr-catalog</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix vlr;
}
- tailf:annotate "/vlr:vlr-catalog" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vlr:vlr-catalog" {
tailf:callpoint rw_callpoint;
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:vlr";
prefix "vlr";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
prefix "vld";
}
+ import vnfd-base {
+ prefix "vnfd-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
- container vlr-catalog {
- config false;
+ augment "/rw-project:project" {
+ container vlr-catalog {
+ config false;
- list vlr {
- key "id";
- unique "name";
+ list vlr {
+ key "id";
+ unique "name";
- leaf id {
- description "Identifier for the VLR.";
- type yang:uuid;
- }
+ leaf id {
+ description "Identifier for the VLR.";
+ type yang:uuid;
+ }
- leaf name {
- description "VLR name.";
- type string;
- }
+ leaf name {
+ description "VLR name.";
+ type string;
+ }
- leaf nsr-id-ref {
- description
+ leaf nsr-id-ref {
+ description
"NS instance identifier.
- This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
- type yang:uuid;
- }
+ This is a leafref /rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:id";
+ type yang:uuid;
+ }
- leaf vld-ref {
- description
- "Reference to VLD
- /nsr:ns-instance-config/nsr:nsr[nsr:id=../nsr-id-ref]/nsd/vld:vld/vld:id";
- type string;
- }
+ leaf vld-ref {
+ description
+ "Reference to VLD
+ /rw-project:project/nsr:ns-instance-config/nsr:nsr[nsr:id=../nsr-id-ref]
+ /nsd/vld:vld/vld:id";
+ type string;
+ }
- leaf res-id {
- description "Identifier for resmgr id mapping";
- type yang:uuid;
- }
+ leaf res-id {
+ description "Identifier for resmgr id mapping";
+ type yang:uuid;
+ }
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
- leaf vendor {
- description "Provider of the VLR.";
- type string;
- }
+ leaf vendor {
+ description "Provider of the VLR.";
+ type string;
+ }
- leaf description {
- description "Description of the VLR.";
- type string;
- }
+ leaf description {
+ description "Description of the VLR.";
+ type string;
+ }
- leaf version {
- description "Version of the VLR";
- type string;
- }
+ leaf version {
+ description "Version of the VLR";
+ type string;
+ }
- leaf type {
- type manotypes:virtual-link-type;
- }
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
- leaf root-bandwidth {
- description
+ leaf root-bandwidth {
+ description
"For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
+ type uint64;
+ }
- leaf leaf-bandwidth {
- description
+ leaf leaf-bandwidth {
+ description
"For ELAN this is the bandwidth of branches.";
- type uint64;
- }
+ type uint64;
+ }
- leaf create-time {
- description
- "Creation timestamp of this Virtual Link.
+ leaf create-time {
+ description
+ "Creation timestamp of this Virtual Link.
The timestamp is expressed as seconds
since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Virtual Link.
+ leaf uptime {
+ description
+ "Active period of this Virtual Link.
Uptime is expressed in seconds";
- type uint32;
- }
+ type uint32;
+ }
- leaf network-id {
- description
+ leaf network-id {
+ description
"Identifier for the allocated network resource.";
- type string;
- }
+ type string;
+ }
- leaf vim-network-name {
- description
+ leaf vim-network-name {
+ description
"Name of network in VIM account. This is used to indicate
pre-provisioned network name in cloud account.";
- type string;
- }
+ type string;
+ }
- // replicate for pnfd container here
+ // replicate for pnfd container here
- uses manotypes:provider-network;
- uses manotypes:ip-profile-info;
+ uses manotypes:provider-network;
+ uses manotypes:ip-profile-info;
- leaf status {
- description
+ leaf status {
+ description
"Status of the virtual link record.";
- type enumeration {
- enum LINK_UP;
- enum DEGRADED;
- enum LINK_DOWN;
+ type enumeration {
+ enum LINK_UP;
+ enum DEGRADED;
+ enum LINK_DOWN;
+ }
}
- }
- leaf operational-status {
- description
- "The operational status of the Virtual Link
+ leaf operational-status {
+ description
+ "The operational status of the Virtual Link
init : The VL is in init stat.
vl-alloc-pending : The VL alloc is pending in VIM
running : The VL is up and running in VM
failed : The VL instantiation failed in VIM.
";
- type enumeration {
- rwpb:enum-type "VlOperationalStatus";
- enum init;
- enum vl-alloc-pending;
- enum running;
- enum vl-terminate-pending;
- enum terminated;
- enum failed;
+ type enumeration {
+ enum init;
+ enum vl-alloc-pending;
+ enum running;
+ enum vl-terminate-pending;
+ enum terminated;
+ enum failed;
+ }
}
}
}
--- /dev/null
+
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+module vnfd-base
+{
+ namespace "http://riftio.com/ns/riftware-1.0/vnfd-base";
+ prefix "vnfd-base";
+
+ import mano-types {
+ prefix "manotypes";
+ }
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Initial revision. This YANG file defines
+ the common types for Virtual Network Function
+ (VNF) descriptor";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ grouping common-connection-point {
+ leaf name {
+ description "Name of the connection point";
+ type string;
+ }
+
+ leaf id {
+ description "Identifier for the internal connection points";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf type {
+ description "Type of the connection point.";
+ type manotypes:connection-point-type;
+ }
+
+ leaf port-security-enabled {
+ description "Enables the port security for the port";
+ type boolean;
+ }
+ }
+
+ typedef interface-type {
+ type enumeration {
+ enum INTERNAL;
+ enum EXTERNAL;
+ }
+ }
+
+ grouping virtual-interface {
+ container virtual-interface {
+ description
+ "Container for the virtual interface properties";
+
+ leaf type {
+ description
+ "Specifies the type of virtual interface
+ between VM and host.
+ VIRTIO : Use the traditional VIRTIO interface.
+ PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
+ SR-IOV : Use SR-IOV interface.
+ E1000 : Emulate E1000 interface.
+ RTL8139 : Emulate RTL8139 interface.
+ PCNET : Emulate PCNET interface.
+ OM-MGMT : Used to specify openmano mgmt external-connection type";
+
+ type enumeration {
+ enum OM-MGMT;
+ enum PCI-PASSTHROUGH;
+ enum SR-IOV;
+ enum VIRTIO;
+ enum E1000;
+ enum RTL8139;
+ enum PCNET;
+ }
+ default "VIRTIO";
+ }
+
+ leaf vpci {
+ description
+ "Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.";
+ type string;
+ }
+
+ leaf bandwidth {
+ description
+ "Aggregate bandwidth of the NIC.";
+ type uint64;
+ }
+ }
+ }
+
+ grouping vnfd-descriptor {
+ leaf id {
+ description "Identifier for the VNFD.";
+ type string {
+ length "1..63";
+ }
+ }
+
+ leaf name {
+ description "VNFD name.";
+ mandatory true;
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf vendor {
+ description "Vendor of the VNFD.";
+ type string;
+ }
+
+ leaf logo {
+ description
+ "Vendor logo for the Virtual Network Function";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VNFD.";
+ type string;
+ }
+
+ leaf version {
+ description "Version of the VNFD";
+ type string;
+ }
+
+ uses manotypes:vnf-configuration;
+
+ container mgmt-interface {
+ description
+ "Interface over which the VNF is managed.";
+
+ choice endpoint-type {
+ description
+ "Indicates the type of management endpoint.";
+
+ case ip {
+ description
+ "Specifies the static IP address for managing the VNF.";
+ leaf ip-address {
+ type inet:ip-address;
+ }
+ }
+
+ case vdu-id {
+ description
+ "Use the default management interface on this VDU.";
+ leaf vdu-id {
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+ }
+
+ case cp {
+ description
+ "Use the ip address associated with this connection point.";
+ leaf cp {
+ type leafref {
+ path "../../connection-point/name";
+ }
+ }
+ }
+ }
+
+ leaf port {
+ description
+ "Port for the management interface.";
+ type inet:port-number;
+ }
+
+ container dashboard-params {
+ description "Parameters for the VNF dashboard";
+
+ leaf path {
+ description "The HTTP path for the dashboard";
+ type string;
+ }
+
+ leaf https {
+ description "Pick HTTPS instead of HTTP , Default is false";
+ type boolean;
+ }
+
+ leaf port {
+ description "The HTTP port for the dashboard";
+ type inet:port-number;
+ }
+ }
+ }
+
+ list internal-vld {
+ key "id";
+ description
+ "List of Internal Virtual Link Descriptors (VLD).
+ The internal VLD describes the basic topology of
+ the connectivity such as E-LAN, E-Line, E-Tree.
+ between internal VNF components of the system.";
+
+ leaf id {
+ description "Identifier for the VLD";
+ type string;
+ }
+
+ leaf name {
+ description "Name of the internal VLD";
+ type string;
+ }
+
+ leaf short-name {
+ description "Short name to appear as label in the UI";
+ type string;
+ }
+
+ leaf description {
+ type string;
+ }
+
+ leaf type {
+ type manotypes:virtual-link-type;
+ }
+
+ leaf root-bandwidth {
+ description
+ "For ELAN this is the aggregate bandwidth.";
+ type uint64;
+ }
+
+ leaf leaf-bandwidth {
+ description
+ "For ELAN this is the bandwidth of branches.";
+ type uint64;
+ }
+
+ list internal-connection-point {
+ key "id-ref";
+ description "List of internal connection points in this VLD";
+ leaf id-ref {
+ description "reference to the internal connection point id";
+ type leafref {
+ path "../../../vdu/internal-connection-point/id";
+ }
+ }
+ }
+
+ uses manotypes:provider-network;
+ choice init-params {
+ description "Extra parameters for VLD instantiation";
+
+ case vim-network-ref {
+ leaf vim-network-name {
+ description
+ "Name of network in VIM account. This is used to indicate
+ pre-provisioned network name in cloud account.";
+ type string;
+ }
+ }
+
+ case vim-network-profile {
+ leaf ip-profile-ref {
+ description "Named reference to IP-profile object";
+ type string;
+ }
+ }
+
+ }
+ }
+
+ uses manotypes:ip-profile-list;
+
+ list connection-point {
+ key "name";
+ description
+ "List for external connection points. Each VNF has one
+ or more external connection points that connect the VNF
+ to other VNFs or to external networks. Each VNF exposes
+ connection points to the orchestrator, which can construct
+ network services by connecting the connection points
+ between different VNFs. The NFVO will use VLDs and VNFFGs
+ at the network service level to construct network services.";
+
+ uses common-connection-point;
+ }
+
+ list vdu {
+ description "List of Virtual Deployment Units";
+ key "id";
+
+ leaf id {
+ description "Unique id for the VDU";
+ type string;
+ }
+
+ leaf name {
+ description "Unique name for the VDU";
+ type string;
+ }
+
+ leaf description {
+ description "Description of the VDU.";
+ type string;
+ }
+
+ leaf count {
+ description "Number of instances of VDU";
+ type uint64;
+ }
+
+ leaf mgmt-vpci {
+ description
+ "Specifies the virtual PCI address. Expressed in
+ the following format dddd:dd:dd.d. For example
+ 0000:00:12.0. This information can be used to
+ pass as metadata during the VM creation.";
+ type string;
+ }
+
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
+ uses manotypes:image-properties;
+
+ choice cloud-init-input {
+ description
+ "Indicates how the contents of cloud-init script are provided.
+ There are 2 choices - inline or in a file";
+
+ case inline {
+ leaf cloud-init {
+ description
+ "Contents of cloud-init script, provided inline, in cloud-config format";
+ type string;
+ }
+ }
+
+ case filename {
+ leaf cloud-init-file {
+ description
+ "Name of file with contents of cloud-init script in cloud-config format";
+ type string;
+ }
+ }
+ }
+
+ uses manotypes:supplemental-boot-data;
+
+ list internal-connection-point {
+ key "id";
+ description
+ "List for internal connection points. Each VNFC
+ has zero or more internal connection points.
+ Internal connection points are used for connecting
+ the VNF with components internal to the VNF. If a VNF
+ has only one VNFC, it may not have any internal
+ connection points.";
+
+ uses common-connection-point;
+
+ leaf internal-vld-ref {
+ type leafref {
+ path "../../../internal-vld/id";
+ }
+ }
+ }
+
+ list interface {
+ description
+ "List of Interfaces (external and internal) for the VNF";
+ key name;
+
+ leaf name {
+ description
+ "Name of the interface. Note that this
+ name has only local significance to the VDU.";
+ type string;
+ }
+
+ leaf position {
+ description
+ "Explicit Position of the interface within the list";
+ type uint32;
+ }
+
+ leaf type {
+ description
+ "Type of the Interface";
+ type interface-type;
+
+ default "EXTERNAL";
+ }
+
+ choice connection-point-type {
+ case internal {
+ leaf internal-connection-point-ref {
+ description
+ "Leaf Ref to the particular internal connection point";
+ type leafref {
+ path "../../internal-connection-point/id";
+ }
+ }
+ }
+ case external {
+ leaf external-connection-point-ref {
+ description
+ "Leaf Ref to the particular external connection point";
+ type leafref {
+ path "../../../connection-point/name";
+ }
+ }
+ }
+ }
+
+ uses virtual-interface;
+ }
+
+
+ list volumes {
+ key "name";
+
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
+
+ uses manotypes:volume-info;
+ }
+ }
+
+ list vdu-dependency {
+ description
+ "List of VDU dependencies.";
+
+ key vdu-source-ref;
+ leaf vdu-source-ref {
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+
+ leaf vdu-depends-on-ref {
+ description
+ "Reference to the VDU on which
+ the source VDU depends.";
+ type leafref {
+ path "../../vdu/id";
+ }
+ }
+ }
+
+ leaf service-function-chain {
+ description "Type of node in Service Function Chaining Architecture";
+
+ type enumeration {
+ enum UNAWARE;
+ enum CLASSIFIER;
+ enum SF;
+ enum SFF;
+ }
+ default "UNAWARE";
+ }
+
+ leaf service-function-type {
+ description
+ "Type of Service Function.
+ NOTE: This needs to map with Service Function Type in ODL to
+ support VNFFG. Service Function Type is mandatory param in ODL
+ SFC. This is temporarily set to string for ease of use";
+ type string;
+ }
+
+ uses manotypes:monitoring-param;
+
+ list placement-groups {
+ description "List of placement groups at VNF level";
+
+ key "name";
+ uses manotypes:placement-group-info;
+
+ list member-vdus {
+
+ description
+ "List of VDUs that are part of this placement group";
+ key "member-vdu-ref";
+
+ leaf member-vdu-ref {
+ type leafref {
+ path "../../../vdu/id";
+ }
+ }
+ }
+ }
+ }
+}
+
+// vim: sw=2
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:vnfd";
prefix "vnfd";
- import mano-types {
- prefix "manotypes";
+ import vnfd-base {
+ prefix "vnfd-base";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
- import ietf-yang-types {
- prefix "yang";
- }
-
- import ietf-inet-types {
- prefix "inet";
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
}
revision 2015-09-10 {
"Derived from earlier versions of base YANG files";
}
- grouping common-connection-point {
- leaf name {
- description "Name of the connection point";
- type string;
- }
-
- leaf id {
- description "Identifier for the internal connection points";
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf type {
- description "Type of the connection point.";
- type manotypes:connection-point-type;
- }
- leaf port-security-enabled {
- description "Enables the port security for the port";
- type boolean;
- }
- }
-
- grouping virtual-interface {
- container virtual-interface {
- description
- "Container for the virtual interface properties";
-
- leaf type {
- description
- "Specifies the type of virtual interface
- between VM and host.
- VIRTIO : Use the traditional VIRTIO interface.
- PCI-PASSTHROUGH : Use PCI-PASSTHROUGH interface.
- SR-IOV : Use SR-IOV interface.
- E1000 : Emulate E1000 interface.
- RTL8139 : Emulate RTL8139 interface.
- PCNET : Emulate PCNET interface.
- OM-MGMT : Used to specify openmano mgmt external-connection type";
-
- type enumeration {
- enum OM-MGMT;
- enum PCI-PASSTHROUGH;
- enum SR-IOV;
- enum VIRTIO;
- enum E1000;
- enum RTL8139;
- enum PCNET;
- }
- default "VIRTIO";
- }
-
- leaf vpci {
- description
- "Specifies the virtual PCI address. Expressed in
- the following format dddd:dd:dd.d. For example
- 0000:00:12.0. This information can be used to
- pass as metadata during the VM creation.";
- type string;
- }
-
- leaf bandwidth {
- description
- "Aggregate bandwidth of the NIC.";
- type uint64;
- }
- }
- }
-
- grouping vnfd-descriptor {
- leaf id {
- description "Identifier for the VNFD.";
- type string;
- }
-
- leaf name {
- description "VNFD name.";
- mandatory true;
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf vendor {
- description "Vendor of the VNFD.";
- type string;
- }
-
- leaf logo {
- description
- "Vendor logo for the Virtual Network Function";
- type string;
- }
-
- leaf description {
- description "Description of the VNFD.";
- type string;
- }
-
- leaf version {
- description "Version of the VNFD";
- type string;
- }
-
- uses manotypes:vnf-configuration;
-
- container mgmt-interface {
- description
- "Interface over which the VNF is managed.";
-
- choice endpoint-type {
- description
- "Indicates the type of management endpoint.";
-
- case ip {
- description
- "Specifies the static IP address for managing the VNF.";
- leaf ip-address {
- type inet:ip-address;
- }
- }
-
- case vdu-id {
- description
- "Use the default management interface on this VDU.";
- leaf vdu-id {
- type leafref {
- path "../../vdu/id";
- }
- }
- }
-
- case cp {
- description
- "Use the ip address associated with this connection point.";
- leaf cp {
- type leafref {
- path "../../connection-point/name";
- }
- }
- }
- }
-
- leaf port {
- description
- "Port for the management interface.";
- type inet:port-number;
- }
-
- container dashboard-params {
- description "Parameters for the VNF dashboard";
-
- leaf path {
- description "The HTTP path for the dashboard";
- type string;
- }
-
- leaf https {
- description "Pick HTTPS instead of HTTP , Default is false";
- type boolean;
- }
-
- leaf port {
- description "The HTTP port for the dashboard";
- type inet:port-number;
- }
- }
- }
-
- list internal-vld {
- key "id";
- description
- "List of Internal Virtual Link Descriptors (VLD).
- The internal VLD describes the basic topology of
- the connectivity such as E-LAN, E-Line, E-Tree.
- between internal VNF components of the system.";
-
- leaf id {
- description "Identifier for the VLD";
- type string;
- }
-
- leaf name {
- description "Name of the internal VLD";
- type string;
- }
-
- leaf short-name {
- description "Short name to appear as label in the UI";
- type string;
- }
-
- leaf description {
- description "Description of internal VLD.";
- type string;
- }
-
- leaf type {
- type manotypes:virtual-link-type;
- }
-
- leaf root-bandwidth {
- description
- "For ELAN this is the aggregate bandwidth.";
- type uint64;
- }
-
- leaf leaf-bandwidth {
- description
- "For ELAN this is the bandwidth of branches.";
- type uint64;
- }
-
- list internal-connection-point {
- key "id-ref";
- description "List of internal connection points in this VLD";
- leaf id-ref {
- description "reference to the internal connection point id";
- type leafref {
- path "../../../vdu/internal-connection-point/id";
- }
- }
- }
- uses manotypes:provider-network;
- choice init-params {
- description "Extra parameters for VLD instantiation";
-
- case vim-network-ref {
- leaf vim-network-name {
- description
- "Name of network in VIM account. This is used to indicate
- pre-provisioned network name in cloud account.";
- type string;
- }
- }
-
- case vim-network-profile {
- leaf ip-profile-ref {
- description "Named reference to IP-profile object";
- type leafref {
- path "../../ip-profiles/name";
- }
- }
- }
- }
- }
-
- uses manotypes:ip-profile-list;
-
- list connection-point {
- key "name";
- description
- "List for external connection points. Each VNF has one
- or more external connection points that connect the VNF
- to other VNFs or to external networks. Each VNF exposes
- connection points to the orchestrator, which can construct
- network services by connecting the connection points
- between different VNFs. The NFVO will use VLDs and VNFFGs
- at the network service level to construct network services.";
-
- uses common-connection-point;
- }
-
- list vdu {
- description "List of Virtual Deployment Units";
- key "id";
-
- leaf id {
- description "Unique id for the VDU";
- type string;
- }
-
- leaf name {
- description "Unique name for the VDU";
- type string;
- }
-
- leaf description {
- description "Description of the VDU.";
- type string;
- }
-
- leaf count {
- description "Number of instances of VDU";
- type uint64;
- }
-
- leaf mgmt-vpci {
- description
- "Specifies the virtual PCI address. Expressed in
- the following format dddd:dd:dd.d. For example
- 0000:00:12.0. This information can be used to
- pass as metadata during the VM creation.";
- type string;
- }
-
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
-
- list alarm {
- key "alarm-id";
-
- uses manotypes:alarm;
- }
-
- uses manotypes:image-properties;
-
- choice cloud-init-input {
- description
- "Indicates how the contents of cloud-init script are provided.
- There are 2 choices - inline or in a file";
-
- case inline {
- leaf cloud-init {
- description
- "Contents of cloud-init script, provided inline, in cloud-config format";
- type string;
- }
- }
-
- case filename {
- leaf cloud-init-file {
- description
- "Name of file with contents of cloud-init script in cloud-config format";
- type string;
- }
- }
- }
-
- uses manotypes:supplemental-boot-data;
-
- list internal-connection-point {
- key "id";
- description
- "List for internal connection points. Each VNFC
- has zero or more internal connection points.
- Internal connection points are used for connecting
- the VNF with components internal to the VNF. If a VNF
- has only one VNFC, it may not have any internal
- connection points.";
-
- uses common-connection-point;
- }
-
- list internal-interface {
- description
- "List of internal interfaces for the VNF";
- key name;
-
- leaf name {
- description
- "Name of internal interface. Note that this
- name has only local significance to the VDU.";
- type string;
- }
-
- leaf vdu-internal-connection-point-ref {
- type leafref {
- path "../../internal-connection-point/id";
- }
- }
- uses virtual-interface;
- }
-
- list external-interface {
- description
- "List of external interfaces for the VNF.
- The external interfaces enable sending
- traffic to and from VNF.";
- key name;
-
- leaf name {
- description
- "Name of the external interface. Note that
- this name has only local significance to
- the VDU.";
- type string;
- }
-
- leaf vnfd-connection-point-ref {
- description
- "Name of the external connection point.";
- type leafref {
- path "../../../connection-point/name";
- }
- }
- uses virtual-interface;
- }
-
- list volumes {
- key "name";
-
- leaf name {
- description "Name of the disk-volumes, e.g. vda, vdb etc";
- type string;
- }
-
- uses manotypes:volume-info;
- }
- }
-
- list vdu-dependency {
- description
- "List of VDU dependencies.";
-
- key vdu-source-ref;
- leaf vdu-source-ref {
- type leafref {
- path "../../vdu/id";
- }
- }
-
- leaf vdu-depends-on-ref {
- description
- "Reference to the VDU on which
- the source VDU depends.";
- type leafref {
- path "../../vdu/id";
- }
- }
- }
-
- leaf service-function-chain {
- description "Type of node in Service Function Chaining Architecture";
-
- type enumeration {
- enum UNAWARE;
- enum CLASSIFIER;
- enum SF;
- enum SFF;
- }
- default "UNAWARE";
- }
-
- leaf service-function-type {
- description
- "Type of Service Function.
- NOTE: This needs to map with Service Function Type in ODL to
- support VNFFG. Service Function Type is mandatory param in ODL
- SFC. This is temporarily set to string for ease of use";
- type string;
- }
-
- uses manotypes:monitoring-param;
-
- list placement-groups {
- description "List of placement groups at VNF level";
-
- key "name";
- uses manotypes:placement-group-info;
-
- list member-vdus {
-
- description
- "List of VDUs that are part of this placement group";
- key "member-vdu-ref";
-
- leaf member-vdu-ref {
- type leafref {
- path "../../../vdu/id";
- }
- }
- }
- }
- }
-
container vnfd-catalog {
description
- "Virtual Network Function Descriptor (VNFD).";
+ "Virtual Network Function Descriptor (VNFD).";
list vnfd {
key "id";
- uses vnfd-descriptor;
- }
+ uses vnfd-base:vnfd-descriptor;
+ }
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "urn:ietf:params:xml:ns:yang:nfvo:vnffgd";
prefix "vnffgd";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-10-27 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
- container vnffgd-catalog {
+ augment "/rw-project:project" {
+ container vnffgd-catalog {
- list vnffgd {
- key "id";
+ list vnffgd {
+ key "id";
- leaf name {
- description "VNF Forwarding Graph Descriptor name.";
- type string;
- }
+ leaf name {
+ description "VNF Forwarding Graph Descriptor name.";
+ type string;
+ }
- leaf id {
- description "Identifier for the VNFFGD.";
- type yang:uuid;
- }
+ leaf id {
+ description "Identifier for the VNFFGD.";
+ type yang:uuid;
+ }
- leaf provider {
- description "Provider of the VNFFGD.";
- type string;
- }
+ leaf provider {
+ description "Provider of the VNFFGD.";
+ type string;
+ }
- leaf description {
- description "Description of the VNFFGD.";
- type string;
- }
+ leaf description {
+ description "Description of the VNFFGD.";
+ type string;
+ }
- leaf version {
- description "Version of the VNFFGD";
- type string;
- }
+ leaf version {
+ description "Version of the VNFFGD";
+ type string;
+ }
- //TODO: Add more content here
+ //TODO: Add more content here
+ }
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:vnfr-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ <path>/vnfr:create-alarm/vnfr:project-name</path>
+ <path>/vnfr:destroy-alarm/vnfr:project-name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-oper</role>
+ <keys-role>rw-project-mano:vnfr-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/vnfr:vnfr-catalog</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:vnfr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/vnfr:vnfr-catalog</path>
+ <path>/vnfr:create-alarm</path>
+ <path>/vnfr:destroy-alarm</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:vnfr-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/vnfr:create-alarm</path>
+ <path>/vnfr:destroy-alarm</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix vnfr;
}
- tailf:annotate "/vnfr:vnfr-catalog" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/vnfr:vnfr-catalog" {
tailf:callpoint rw_callpoint;
}
/*
- *
- * Copyright 2016 RIFT.IO Inc
+ *
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "manotypes";
}
- import rw-pb-ext {
- prefix "rwpb";
+ import vnfd-base {
+ prefix "vnfd-base";
}
- import vnfd {
- prefix "vnfd";
+ import project-vnfd {
+ prefix "project-vnfd";
}
- import nsd {
- prefix "nsd";
+ import project-nsd {
+ prefix "project-nsd";
}
import vlr {
prefix "inet";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-cloud {
+ prefix "rw-cloud";
+ }
+
+ revision 2017-02-28 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-09-10 {
description
"Initial revision. This YANG file defines
"Derived from earlier versions of base YANG files";
}
+ typedef vdu-operational-status {
+ type enumeration {
+ enum init;
+ enum vm-init-phase;
+ enum vm-alloc-pending;
+ enum running;
+ enum terminate;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
+ }
+
+ typedef vnf-operational-status {
+ type enumeration {
+ enum pre-init;
+ enum init;
+ enum vl-init-phase;
+ enum vm-init-phase;
+ enum running;
+ enum terminate;
+ enum vm-terminate-phase;
+ enum vl-terminate-phase;
+ enum terminated;
+ enum failed;
+ }
+ }
+
grouping placement-group-info {
list placement-groups-info {
description
key "name";
uses manotypes:placement-group-info;
uses manotypes:placement-group-input;
- }
+ }
}
-
-
+
grouping virtual-interface {
container virtual-interface {
description
}
}
- container vnfr-catalog {
- config false;
- list vnfr {
- description
- "Virtual Network Function Record (VNFR).";
- key "id";
- unique "name";
+ grouping associated-virtual-cps {
+ list virtual-cps {
+ key "name";
+ uses vnfd-base:common-connection-point;
+
+ leaf ip-address {
+ description
+ "IP address assigned to the virtual connection point";
+ type inet:ip-address;
+ }
+
+ leaf mac-address {
+ description
+ "MAC address assigned to the virtual connection point";
+ type string;
+ }
- leaf id {
- description "Identifier for the VNFR.";
- type yang:uuid;
+ leaf connection-point-id {
+ description "VIM identifier for connection point";
+ type string;
}
+ }
+ }
- leaf nsr-id-ref {
+ augment "/rw-project:project" {
+ container vnfr-catalog {
+ config false;
+ list vnfr {
description
+ "Virtual Network Function Record (VNFR).";
+ key "id";
+ unique "name";
+
+ leaf id {
+ description "Identifier for the VNFR.";
+ type yang:uuid;
+ }
+
+ leaf nsr-id-ref {
+ description
"NS instance identifier.
This is a leafref /nsr:ns-instance-config/nsr:nsr/nsr:id";
- type yang:uuid;
- }
+ type yang:uuid;
+ }
- leaf member-vnf-index-ref {
- description "Reference to member VNF index in Network service.";
- type leafref {
- path "/nsd:nsd-catalog/nsd:nsd/nsd:constituent-vnfd/nsd:member-vnf-index";
+ leaf member-vnf-index-ref {
+ description "Reference to member VNF index in Network service.";
+ type leafref {
+ path "../../../project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:constituent-vnfd/project-nsd:member-vnf-index";
+ }
}
- }
- leaf dashboard-url {
- description "Dashboard URL";
- type inet:uri;
- }
+ leaf dashboard-url {
+ description "Dashboard URL";
+ type inet:uri;
+ }
- leaf name {
- description "VNFR name.";
- type string;
- }
+ leaf name {
+ description "VNFR name.";
+ type string;
+ }
- leaf short-name {
- description "VNFR short name.";
- type string;
- }
+ leaf short-name {
+ description "VNFR short name.";
+ type string;
+ }
- leaf vendor {
- description "Vendor of the VNFR.";
- type string;
- }
+ leaf vendor {
+ description "Vendor of the VNFR.";
+ type string;
+ }
- leaf description {
- description "Description of the VNFR.";
- type string;
- }
+ leaf description {
+ description "Description of the VNFR.";
+ type string;
+ }
- leaf version {
- description "Version of the VNFR";
- type string;
- }
+ leaf version {
+ description "Version of the VNFR";
+ type string;
+ }
- leaf create-time {
- description
- "Creation timestamp of this Virtual Network
- Function. The timestamp is expressed as
+ leaf create-time {
+ description
+ "Creation timestamp of this Virtual Network
+ Function. The timestamp is expressed as
seconds since unix epoch - 1970-01-01T00:00:00Z";
- type uint32;
- }
+ type uint32;
+ }
- leaf uptime {
- description
- "Active period of this Virtual Network Function.
+ leaf uptime {
+ description
+ "Active period of this Virtual Network Function.
Uptime is expressed in seconds";
- type uint32;
- }
-
- container vnfd {
- description "VNF descriptor used to instantiate this VNF";
- uses vnfd:vnfd-descriptor;
- }
-
- // Use parameters provided here to configure this VNF
- uses manotypes:vnf-configuration;
+ type uint32;
+ }
- // Mainly used by Mon-params & dashboard url
- container mgmt-interface {
- leaf ip-address {
- type inet:ip-address;
+ container vnfd {
+ description "VNF descriptor used to instantiate this VNF";
+ uses vnfd-base:vnfd-descriptor;
}
- leaf port {
- type inet:port-number;
+
+ leaf vnfd-ref {
+ description "Reference to VNFD";
+ type leafref {
+ path "../../../project-vnfd:vnfd-catalog/project-vnfd:vnfd/project-vnfd:id";
+ }
}
- }
- list internal-vlr {
- key "vlr-ref";
+ // Use parameters provided here to configure this VNF
+ uses manotypes:vnf-configuration;
- leaf vlr-ref {
- description "Reference to a VLR record in the VLR catalog";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ // Mainly used by Mon-params & dashboard url
+ container mgmt-interface {
+ leaf ip-address {
+ type inet:ip-address;
+ }
+
+ leaf port {
+ type inet:port-number;
+ }
+
+ container ssh-key {
+ description "SSH key pair used for this VNF";
+ leaf public-key {
+ description "Public key configured on this VNF";
+ type string;
+ }
+
+ leaf private-key-file {
+ description "Path to the private key file";
+ type string;
+ }
}
}
- leaf-list internal-connection-point-ref {
- type leafref {
- path "../../vdur/internal-connection-point/id";
+ list internal-vlr {
+ key "vlr-ref";
+
+ leaf vlr-ref {
+ description "Reference to a VLR record in the VLR catalog";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
+ }
+
+ leaf-list internal-connection-point-ref {
+ type leafref {
+ path "../../vdur/internal-connection-point/id";
+ }
}
}
- }
- list connection-point {
- key "name";
- description
+ list connection-point {
+ key "name";
+ description
"List for external connection points. Each VNF has one
or more external connection points. As the name
implies that external connection points are used for
different VNFs. The NFVO will use VLDs and VNFFGs at
the network service level to construct network services.";
- uses vnfd:common-connection-point;
+ uses vnfd-base:common-connection-point;
- leaf vlr-ref {
- description
+ leaf vlr-ref {
+ description
"Reference to the VLR associated with this connection point";
- type leafref {
- path "/vlr:vlr-catalog/vlr:vlr/vlr:id";
+ type leafref {
+ path "../../../../vlr:vlr-catalog/vlr:vlr/vlr:id";
+ }
}
- }
- leaf ip-address {
- description
+ leaf ip-address {
+ description
"IP address assigned to the external connection point";
- type inet:ip-address;
- }
- leaf mac-address {
- description
+ type inet:ip-address;
+ }
+
+ leaf mac-address {
+ description
"MAC address assigned to the external connection point";
- // type inet:mac-address;
- type string;
- }
- leaf connection-point-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ // type inet:mac-address;
+ type string;
+ }
- list vdur {
- description "List of Virtual Deployment Units";
- key "id";
- unique "name";
+ leaf connection-point-id {
+ type string;
+ }
- leaf id {
- description "Unique id for the VDU";
- type yang:uuid;
+ uses associated-virtual-cps;
}
- leaf name {
- description "name of the instantiated VDUR";
- type string;
- }
+ list vdur {
+ description "List of Virtual Deployment Units";
+ key "id";
+ unique "name";
- leaf unique-short-name {
- description "Short Unique name of the VDU
- This will be of the format NSR name-ShortnedString-VDUname
- NSR name and VDU name shall be constrained to 10 characters";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
+ leaf id {
+ description "Unique id for the VDU";
+ type yang:uuid;
+ }
- leaf vdu-id-ref {
- type leafref {
- path "../../vnfd/vdu/id";
+ leaf name {
+ description "name of the instantiated VDUR";
+ type string;
}
- }
- leaf vim-id {
- description "Allocated VM resource id";
- type string;
- }
+ leaf unique-short-name {
+ description "Short Unique name of the VDU
+ This will be of the format NSR name-ShortnedString-VDUname
+ NSR name and VDU name shall be constrained to 10 characters";
+ type string;
+ }
- leaf flavor-id {
- description "VIM assigned flavor id";
- type string;
- }
+ leaf vdu-id-ref {
+ type leafref {
+ path "../../vnfd/vdu/id";
+ }
+ }
- leaf image-id {
- description "VIM assigned image id";
- type string;
- }
+ leaf vim-id {
+ description "Allocated VM resource id";
+ type string;
+ }
- leaf management-ip {
- description "Management IP address";
- type inet:ip-address;
- }
+ leaf flavor-id {
+ description "VIM assigned flavor id";
+ type string;
+ }
- leaf vm-management-ip {
- description "VM Private Management IP address";
- type inet:ip-address;
- }
+ leaf image-id {
+ description "VIM assigned image id";
+ type string;
+ }
- leaf console-url {
- description "Console URL for this VDU, if available";
- type inet:uri;
- }
+ leaf management-ip {
+ description "Management IP address";
+ type inet:ip-address;
+ }
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
+ leaf vm-management-ip {
+ description "VM Private Management IP address";
+ type inet:ip-address;
+ }
- uses manotypes:supplemental-boot-data;
+ leaf console-url {
+ description "Console URL for this VDU, if available";
+ type inet:uri;
+ }
- list volumes {
- key "name";
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
- leaf name {
- description "Name of the disk-volumes, e.g. vda, vdb etc";
- type string;
- }
+ uses manotypes:supplemental-boot-data;
- leaf volume-id {
- description "VIM assigned volume id";
- type string;
- }
+ list volumes {
+ key "name";
- uses manotypes:volume-info;
- }
+ leaf name {
+ description "Name of the disk-volumes, e.g. vda, vdb etc";
+ type string;
+ }
- list alarms {
- description
- "A list of the alarms that have been created for this VDU";
+ leaf volume-id {
+ description "VIM assigned volume id";
+ type string;
+ }
- key "alarm-id";
- uses manotypes:alarm;
- }
+ uses manotypes:volume-info;
+ }
- list internal-connection-point {
- key "id";
- description
+ list internal-connection-point {
+ key "id";
+ description
"List for internal connection points. Each VNFC
has zero or more internal connection points.
Internal connection points are used for connecting
has only one VNFC, it may not have any internal
connection points.";
- uses vnfd:common-connection-point;
+ uses vnfd-base:common-connection-point;
- leaf ip-address {
- description
+ leaf ip-address {
+ description
"IP address assigned to the internal connection point";
- type inet:ip-address;
- }
- leaf mac-address {
- description
+ type inet:ip-address;
+ }
+
+ leaf mac-address {
+ description
"MAC address assigned to the internal connection point";
- // type inet:mac-address;
- type string;
- }
- }
+ // type inet:mac-address;
+ type string;
+ }
- list internal-interface {
- description
- "List of internal interfaces for the VNF";
- key name;
+ leaf connection-point-id {
+ type string;
+ }
- leaf name {
- description
- "Name of internal interface. Note that this
- name has only local significance to the VDU.";
- type string;
+ uses associated-virtual-cps;
}
- leaf vdur-internal-connection-point-ref {
- type leafref {
- path "../../internal-connection-point/id";
+ list interface {
+ description
+ "List of interfaces (internal and external) for the VNF";
+ key name;
+
+ leaf name {
+ description
+ "Name of the interface. Note that this
+ name has only local significance to the VDU.";
+ type string;
}
- }
- uses virtual-interface;
- }
- list external-interface {
- description
- "List of external interfaces for the VNF.
- The external interfaces enable sending
- traffic to and from VNF.";
- key name;
+ leaf position {
+ description
+ "Explicit Position of the interface within the list";
+ type uint32;
+ }
- leaf name {
- description
- "Name of the external interface. Note that
- this name has only local significance.";
- type string;
+ leaf type {
+ description
+ "Type of the Interface";
+
+ type vnfd-base:interface-type;
+
+ default "EXTERNAL";
+ }
+
+ choice connection-point-type {
+ case internal {
+ leaf internal-connection-point-ref {
+ description
+ "Leaf Ref to the particular internal connection point";
+ type leafref {
+ path "../../internal-connection-point/id";
+ }
+ }
+ }
+ case external {
+ leaf external-connection-point-ref {
+ description
+ "Leaf Ref to the particular external connection point";
+ type leafref {
+ path "../../../connection-point/name";
+ }
+ }
+ }
+ }
+ uses virtual-interface;
}
- leaf vnfd-connection-point-ref {
+ leaf operational-status {
description
- "Name of the external connection point.";
- type leafref {
- path "../../../connection-point/name";
- }
+ "The operational status of the VDU
+ init : The VDU has just started.
+ vm-init-phase : The VDUs in the VNF is being created in VIM.
+ vm-alloc-pending : The VM alloc is pending in VIM
+ running : The VDU is active in VM
+ terminate : The VDU is being terminated
+ vm-terminate-phase : The VDU in the VNF is being terminated in VIM.
+ terminated : The VDU is in the terminated state.
+ failed : The VDU instantiation failed.
+ ";
+ type vdu-operational-status;
}
- uses virtual-interface;
+ uses placement-group-info;
}
+
+ uses manotypes:monitoring-param;
+
leaf operational-status {
description
- "The operational status of the VDU
- init : The VDU has just started.
- vm-init-phase : The VDUs in the VNF is being created in VIM.
- vm-alloc-pending : The VM alloc is pending in VIM
- running : The VDU is active in VM
- terminate : The VDU is being terminated
- vm-terminate-phase : The VDU in the VNF is being terminated in VIM.
- terminated : The VDU is in the terminated state.
- failed : The VDU instantiation failed.
+ "The operational status of the VNFR instance
+ pre-init : The VNF before Input Param Substitution.
+ init : The VNF has just started.
+ vl-init-phase : The internal VLs in the VNF are being instantiated.
+ vm-init-phase : The VMs for VDUs in the VNF are being instantiated.
+ running : The VNF is in running state.
+ terminate : The VNF is being terminated.
+ vm-terminate-phase : The VMs in the VNF are being terminated.
+ vl-terminate-phase : The internal VLs in the VNF are being terminated.
+ terminated : The VNF is in the terminated state.
+ failed : The VNF instantiation failed
";
-
- type enumeration {
- rwpb:enum-type "VduOperationalStatus";
- enum init;
- enum vm-init-phase;
- enum vm-alloc-pending;
- enum running;
- enum terminate;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
- }
+ type vnf-operational-status;
}
- uses placement-group-info;
- }
-
- uses manotypes:monitoring-param;
-
- leaf operational-status {
- description
- "The operational status of the VNFR instance
- init : The VNF has just started.
- vl-init-phase : The internal VLs in the VNF are being instantiated.
- vm-init-phase : The VMs for VDUs in the VNF are being instantiated.
- running : The VNF is in running state.
- terminate : The VNF is being terminated.
- vm-terminate-phase : The VMs in the VNF are being terminated.
- vl-terminate-phase : The internal VLs in the VNF are being terminated.
- terminated : The VNF is in the terminated state.
- failed : The VNF instantiation failed
- ";
- type enumeration {
- rwpb:enum-type "VnfrOperationalStatus";
- enum init;
- enum vl-init-phase;
- enum vm-init-phase;
- enum running;
- enum terminate;
- enum vm-terminate-phase;
- enum vl-terminate-phase;
- enum terminated;
- enum failed;
- }
- }
- leaf config-status {
- description
- "The configuration status of the NS instance
+ leaf config-status {
+ description
+ "The configuration status of the NS instance
configuring: At least one of the VNFs in this instance is in configuring state
configured: All the VNFs in this NS instance are configured or config-not-needed state
";
- type enumeration {
- enum configuring {
- value 1;
- }
- enum configured {
- value 2;
- }
- enum failed {
- value 3;
- }
- enum config-not-needed {
- value 4;
+ type enumeration {
+ enum configuring {
+ value 1;
+ }
+ enum configured {
+ value 2;
+ }
+ enum failed {
+ value 3;
+ }
+ enum config-not-needed {
+ value 4;
+ }
}
}
+ uses placement-group-info;
+
+ container cloud-config {
+ uses manotypes:cloud-config;
+ }
}
- uses placement-group-info;
}
}
rpc create-alarm {
description "Create an alert for a running VDU";
input {
+ uses manotypes:rpc-project-name;
+
leaf cloud-account {
mandatory true;
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../project-name]" +
+ "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
leaf vdur-id {
rpc destroy-alarm {
description "Destroy an alert that is associated with a running VDU";
input {
+ uses manotypes:rpc-project-name;
+
leaf cloud-account {
mandatory true;
- type string;
+ type leafref {
+ path "/rw-project:project[rw-project:name=current()/../project-name]" +
+ "/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
leaf alarm-id {
}
}
}
-
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME rwcal)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs src plugins test)
rift_add_subdirs(SUBDIR_LIST ${subdirs})
install(FILES include/riftware/rwcal-api.h
DESTINATION usr/include/riftware
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
PROGRAMS
etc/userdata-template
DESTINATION etc
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
rift/cal/utils.py
rift/cal/rwcal_status.py
PYTHON3_ONLY
- COMPONENT rwcal-1.0)
+ COMPONENT ${INSTALL_COMPONENT})
install(
PROGRAMS
rift/cal/cloudsim
DESTINATION usr/bin
- COMPONENT rwcal-1.0
+ COMPONENT ${INSTALL_COMPONENT}
)
*/
rw_status_t rwcal_get_image_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **images);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **images);
/*
* Delete Image.
*/
rw_status_t rwcal_delete_image(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * image_id);
/*
*/
rw_status_t rwcal_create_flavor(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_FlavorInfoItem *flavor,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_FlavorinfoList *flavor,
char *flavor_id);
*/
rw_status_t rwcal_delete_flavor(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * flavor_id);
/*
*/
rw_status_t rwcal_get_flavor(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * flavor_id,
- rwpb_gi_Rwcal_FlavorInfoItem **flavor);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_FlavorinfoList **flavor);
/*
* Get a list of the details for all flavors
*/
rw_status_t rwcal_get_flavor_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **flavors);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **flavors);
/*
* Create a virtual machine.
*/
rw_status_t rwcal_create_vm(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VMInfoItem *vm,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_VminfoList *vm,
char **vm_id);
/*
*/
rw_status_t rwcal_delete_vm(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * vm_id);
/*
*/
rw_status_t rwcal_reboot_vm(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * vm_id);
/*
*/
rw_status_t rwcal_start_vm(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * vm_id);
/*
*/
rw_status_t rwcal_stop_vm(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * vm_id);
/*
*/
rw_status_t rwcal_get_vm_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources** vms);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources** vms);
/*
* Create a tenant.
*/
rw_status_t rwcal_create_tenant(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * tenant_name,
char *** tenant_info);
*/
rw_status_t rwcal_delete_tenant(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * tenant_id);
/*
*/
rw_status_t rwcal_get_tenant_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **tenants);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **tenants);
/*
* Create a role.
*/
rw_status_t rwcal_create_role(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * role_name,
char *** role_info);
*/
rw_status_t rwcal_delete_role(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char * role_id);
/*
*/
rw_status_t rwcal_get_role_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **roles);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **roles);
/*
* Add a new host
*/
rw_status_t rwcal_add_host(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_HostInfoItem *host,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_HostinfoList *host,
char **host_id);
/*
*/
rw_status_t rwcal_remove_host(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *host_id);
/*
*/
rw_status_t rwcal_get_host(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *host_id,
- rwpb_gi_Rwcal_HostInfoItem **host);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_HostinfoList **host);
/*
* Get a list of hosts
*/
rw_status_t rwcal_get_host_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **hosts);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **hosts);
/*
* Create a new port
*/
rw_status_t rwcal_create_port(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_PortInfoItem *port,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_PortinfoList *port,
char **port_id);
/*
*/
rw_status_t rwcal_delete_port(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *port_id);
/*
*/
rw_status_t rwcal_get_port(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *port_id,
- rwpb_gi_Rwcal_PortInfoItem **port);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_PortinfoList **port);
/*
* Get a list of ports
*/
rw_status_t rwcal_get_port_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **ports);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **ports);
/*
* Create a new network
*/
rw_status_t rwcal_create_network(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_NetworkInfoItem *network,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_NetworkinfoList *network,
char **network_id);
/*
*/
rw_status_t rwcal_delete_network(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *network_id);
/*
*/
rw_status_t rwcal_get_network(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
const char *network_id,
- rwpb_gi_Rwcal_NetworkInfoItem **network);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_NetworkinfoList **network);
/*
* Get a the management network
*/
rw_status_t rwcal_get_management_network(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_NetworkInfoItem **network);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources_NetworkinfoList **network);
/*
* Get a list of networks
*/
rw_status_t rwcal_get_network_list(
rwcal_module_ptr_t rwcal,
- rwpb_gi_Rwcal_CloudAccount *account,
- rwpb_gi_Rwcal_VimResources **networks);
+ rwpb_gi_Rwcal_YangData_RwProject_Project_CloudAccounts_CloudAccountList *account,
+ rwpb_gi_Rwcal_YangData_RwProject_Project_VimResources **networks);
/*
* Get a RwLog Context so that log messages can go to rwlog
include(rift_plugin)
-rift_install_python_plugin(rwcalproxytasklet rwcalproxytasklet.py)
+rift_install_gobject_python_plugin(rwcalproxytasklet rwcalproxytasklet.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
FILES
rift/tasklets/rwcalproxytasklet/__init__.py
rift/tasklets/rwcalproxytasklet/rwcalproxytasklet.py
- COMPONENT rwcalproxytasklet-1.0
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
),
),
+ (r"/api/get_virtual_link_by_name", CalRequestHandler,
+ mk_attrs(
+ cal_method="get_virtual_link_by_name",
+ input_params=[
+ RPCParam("link_name"),
+ ],
+ output_params=[
+ RPCParam("response", "VirtualLinkInfoParams"),
+ ],
+ ),
+ ),
+
(r"/api/get_virtual_link_list", CalRequestHandler,
mk_attrs(
cal_method="get_virtual_link_list",
super().start()
cal = self.get_cal_interface()
- account = RwcalYang.CloudAccount(account_type="cloudsim")
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(account_type="cloudsim")
self.app = CalProxyApp(self.log, self.loop, cal, account)
self._dts = rift.tasklets.DTS(
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
VALA_PACKAGES
rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
- rw_log-1.0
+ rw_log-1.0 rw_project_yang-1.0 rw_user_yang-1.0 rw_rbac_base_yang-1.0
VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwvcs/plugins/yang
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwcal_yang rwlog_gi rwschema_yang rwmanifest_yang
+ DEPENDS rwcal_yang rwmanifest_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
* Cloud Account Credentails Validation related API
*/
public abstract RwTypes.RwStatus validate_cloud_creds(
- Rwcal.CloudAccount account,
- out Rwcal.CloudConnectionStatus status);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_Rwcal_ConnectionStatus status);
/*
* Image related APIs
*/
public abstract RwTypes.RwStatus get_image_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources images);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources images);
public abstract RwTypes.RwStatus create_image(
- Rwcal.CloudAccount account,
- Rwcal.ImageInfoItem image,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_ImageinfoList image,
out string image_id);
public abstract RwTypes.RwStatus delete_image(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string image_id);
public abstract RwTypes.RwStatus get_image(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string image_id,
- out Rwcal.ImageInfoItem image);
+ out Rwcal.YangData_RwProject_Project_VimResources_ImageinfoList image);
/*
* VM Releated APIs
*/
public abstract RwTypes.RwStatus create_vm(
- Rwcal.CloudAccount account,
- Rwcal.VMInfoItem vm,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_VminfoList vm,
out string vm_id);
public abstract RwTypes.RwStatus start_vm(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id);
public abstract RwTypes.RwStatus stop_vm(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id);
public abstract RwTypes.RwStatus delete_vm(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id);
public abstract RwTypes.RwStatus reboot_vm(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id);
public abstract RwTypes.RwStatus get_vm_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources vms);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources vms);
public abstract RwTypes.RwStatus get_vm(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id,
- out Rwcal.VMInfoItem vm);
+ out Rwcal.YangData_RwProject_Project_VimResources_VminfoList vm);
/*
* Flavor related APIs
*/
public abstract RwTypes.RwStatus create_flavor(
- Rwcal.CloudAccount account,
- Rwcal.FlavorInfoItem flavor_info_item,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_FlavorinfoList flavor_info_item,
out string flavor_id);
public abstract RwTypes.RwStatus delete_flavor(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string flavor_id);
public abstract RwTypes.RwStatus get_flavor_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources flavors);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources flavors);
public abstract RwTypes.RwStatus get_flavor(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string flavor_id,
- out Rwcal.FlavorInfoItem flavor);
+ out Rwcal.YangData_RwProject_Project_VimResources_FlavorinfoList flavor);
/*
* Tenant related APIs
*/
public abstract RwTypes.RwStatus create_tenant(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string tenant_name,
[CCode (array_length = false, array_null_terminated = true)]
out string [] tenant_info);
public abstract RwTypes.RwStatus delete_tenant(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string tenant_id);
public abstract RwTypes.RwStatus get_tenant_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources tenants);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources tenants);
/*
* Role related APIs
*/
public abstract RwTypes.RwStatus create_role(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string role_name,
[CCode (array_length = false, array_null_terminated = true)]
out string [] role_info);
public abstract RwTypes.RwStatus delete_role(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string role_id);
public abstract RwTypes.RwStatus get_role_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources roles);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources roles);
/*
* Port related APIs
*/
public abstract RwTypes.RwStatus create_port(
- Rwcal.CloudAccount account,
- Rwcal.PortInfoItem port,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_PortinfoList port,
out string port_id);
public abstract RwTypes.RwStatus delete_port(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string port_id);
public abstract RwTypes.RwStatus get_port(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string port_id,
- out Rwcal.PortInfoItem port);
+ out Rwcal.YangData_RwProject_Project_VimResources_PortinfoList port);
public abstract RwTypes.RwStatus get_port_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources ports);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources ports);
/*
* Host related APIs
*/
public abstract RwTypes.RwStatus add_host(
- Rwcal.CloudAccount account,
- Rwcal.HostInfoItem host,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_HostinfoList host,
out string host_id);
public abstract RwTypes.RwStatus remove_host(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string host_id);
public abstract RwTypes.RwStatus get_host(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string host_id,
- out Rwcal.HostInfoItem host);
+ out Rwcal.YangData_RwProject_Project_VimResources_HostinfoList host);
public abstract RwTypes.RwStatus get_host_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources hosts);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources hosts);
/*
* Network related APIs
*/
public abstract RwTypes.RwStatus create_network(
- Rwcal.CloudAccount account,
- Rwcal.NetworkInfoItem network,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VimResources_NetworkinfoList network,
out string network_id);
public abstract RwTypes.RwStatus delete_network(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string network_id);
public abstract RwTypes.RwStatus get_network(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string network_id,
- out Rwcal.NetworkInfoItem network);
+ out Rwcal.YangData_RwProject_Project_VimResources_NetworkinfoList network);
public abstract RwTypes.RwStatus get_network_list(
- Rwcal.CloudAccount account,
- out Rwcal.VimResources networks);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources networks);
public abstract RwTypes.RwStatus get_management_network(
- Rwcal.CloudAccount account,
- out Rwcal.NetworkInfoItem network);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VimResources_NetworkinfoList network);
/*
* Higher Order CAL APIs
*/
public abstract void create_virtual_link(
- Rwcal.CloudAccount account,
- Rwcal.VirtualLinkReqParams link_params,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VirtualLinkReqParams link_params,
out RwcalStatus status,
out string link_id);
public abstract RwTypes.RwStatus delete_virtual_link(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string link_id);
public abstract RwTypes.RwStatus get_virtual_link(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string link_id,
- out Rwcal.VirtualLinkInfoParams response);
+ out Rwcal.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList response);
+
+ public abstract RwTypes.RwStatus get_virtual_link_by_name(
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ string link_name,
+ out Rwcal.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList response);
public abstract RwTypes.RwStatus get_virtual_link_list(
- Rwcal.CloudAccount account,
- out Rwcal.VNFResources resources);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwcal.YangData_RwProject_Project_VnfResources resources);
public abstract void create_vdu(
- Rwcal.CloudAccount account,
- Rwcal.VDUInitParams vdu_params,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VduInitParams vdu_params,
out RwcalStatus status,
out string vdu_id);
public abstract RwTypes.RwStatus modify_vdu(
- Rwcal.CloudAccount account,
- Rwcal.VDUModifyParams vdu_params);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwcal.YangData_RwProject_Project_VduModifyParams vdu_params);
public abstract RwTypes.RwStatus delete_vdu(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vdu_id);
- public abstract RwTypes.RwStatus get_vdu(
- Rwcal.CloudAccount account,
+ public abstract void get_vdu(
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vdu_id,
- out Rwcal.VDUInfoParams response);
-
- public abstract RwTypes.RwStatus get_vdu_list(
- Rwcal.CloudAccount account,
- out Rwcal.VNFResources resources);
+ string mgmt_network,
+ out RwcalStatus status,
+ out Rwcal.YangData_RwProject_Project_VnfResources_VduInfoList response);
+ public abstract void get_vdu_list(
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out RwcalStatus status,
+ out Rwcal.YangData_RwProject_Project_VnfResources resources);
}
}
include(rift_plugin)
-set(PKG_NAME rwcal-aws)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-aws)
-rift_install_python_plugin(rwcal_aws rwcal_aws.py)
+rift_install_gobject_python_plugin(rwcal_aws rwcal_aws.py COMPONENT ${INSTALL_COMPONENT})
rift_python_install_tree(
FILES
rift/rwcal/aws/prepare_vm.py
rift/rwcal/aws/delete_vm.py
PYTHON3_ONLY
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT})
"""
try:
interface = self.get_network_interface(NetworkInterfaceId=NetworkInterfaceId)
- if interface and interface.association and 'AssociationId' in interface.association:
- self._ec2_client_handle.disassociate_address(AssociationId = interface.association['AssociationId'])
- self._ec2_client_handle.release_address(AllocationId=interface.association['AllocationId'])
+ if interface and interface.association_attribute and 'AssociationId' in interface.association_attribute:
+ self._ec2_client_handle.disassociate_address(AssociationId = interface.association_attribute['AssociationId'])
+ self._ec2_client_handle.release_address(AllocationId=interface.association_attribute['AllocationId'])
except Exception as e:
logger.error("AWSDriver: Associating Public IP to network interface %s failed with exception: %s",NetworkInterfaceId,(repr(e)))
raise
logger.info("Deleting network interface with id %s",port_id)
port = drv.get_network_interface(port_id)
if port:
- if port.association and 'AssociationId' in port.association:
+ if port.association_attribute and 'AssociationId' in port.association_attribute:
drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
drv.delete_network_interface(port.id)
else:
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus(
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus(
status="success",
details="AWS Cloud Account validation not implemented yet"
)
Returns:
The ImageInfoItem
"""
- img = RwcalYang.ImageInfoItem()
+ img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
img.name = img_info.name
img.id = img_info.id
Returns:
The the list of images in VimResources object
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
images = self._get_driver(account).list_images()
for img in images:
response.imageinfo_list.append(RwcalAWSPlugin._fill_image_info(img))
Returns:
Protobuf Gi object for VM
"""
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_id = vm_info.id
vm.image_id = vm_info.image_id
vm.flavor_id = vm_info.instance_type
Returns:
List containing VM information
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
vms = self._get_driver(account).list_instances()
for vm in vms:
response.vminfo_list.append(RwcalAWSPlugin._fill_vm_info(vm))
vcpus = flavor.vm_flavor.vcpu_count,
disk = flavor.vm_flavor.storage_gb)
- new_flavor = RwcalYang.FlavorInfoItem()
+ new_flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
new_flavor.name = flavor.name
new_flavor.vm_flavor.memory_mb = flavor.vm_flavor.memory_mb
new_flavor.vm_flavor.vcpu_count = flavor.vm_flavor.vcpu_count
Returns:
Object of class FlavorInfoItem
"""
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = flavor_info.name
flavor.id = flavor_info.id
flavor.vm_flavor.memory_mb = flavor_info.vm_flavor.memory_mb
Returns:
List of flavors
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
for flv in self._flavor_list:
response.flavorinfo_list.append(RwcalAWSPlugin._fill_flavor_info(flv))
return response
Returns:
Network info item
"""
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_id = network_info.subnet_id
network.subnet = network_info.cidr_block
if network_info.tags:
Returns:
List of networks
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
networks = self._get_driver(account).get_subnet_list()
for network in networks:
response.networkinfo_list.append(self._fill_network_info(network, account))
Returns:
Port info item
"""
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_id = port_info.id
port.network_id = port_info.subnet_id
Returns:
Port info list
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
ports = self._get_driver(account).get_network_interface_list()
for port in ports:
response.portinfo_list.append(RwcalAWSPlugin._fill_port_info(port))
@staticmethod
def _fill_connection_point_info(c_point, port_info):
- """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+ """Create a GI object for RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints()
Converts EC2.NetworkInterface object returned by AWS driver into
Protobuf Gi Object
Arguments:
port_info - Network Interface information from AWS
Returns:
- Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+ Protobuf Gi object for RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints
"""
c_point.virtual_link_id = port_info.subnet_id
c_point.connection_point_id = port_info.id
if port_info.attachment:
c_point.vdu_id = port_info.attachment['InstanceId']
c_point.ip_address = port_info.private_ip_address
- if port_info.association and 'PublicIp' in port_info.association:
- c_point.public_ip = port_info.association['PublicIp']
+ if port_info.association and port_info.association.public_ip:
+ c_point.public_ip = port_info.association.public_ip
if port_info.tag_set:
for tag in port_info.tag_set:
if tag['Key'] == 'Name':
Returns:
Protobuf Gi object for VirtualLinkInfoParams
"""
- link = RwcalYang.VirtualLinkInfoParams()
+ link = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
if network_info.state == 'available':
link.state = 'active'
else:
Returns:
Protobuf Gi object for VDUInfoParams
"""
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.vdu_id = vm_info.id
mgmt_port = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] == 0]
assert(len(mgmt_port) == 1)
vdu.management_ip = mgmt_port[0].private_ip_address
- if mgmt_port[0].association and 'PublicIp' in mgmt_port[0].association:
- vdu.public_ip = mgmt_port[0].association['PublicIp']
+ if mgmt_port[0].association and mgmt_port[0].association.public_ip:
+ vdu.public_ip = mgmt_port[0].association.public_ip
#For now set managemnet ip also to public ip
#vdu.management_ip = vdu.public_ip
if vm_info.tags:
#if vm_info.placement and 'AvailabilityZone' in vm_info.placement:
# vdu.availability_zone = vm_info.placement['AvailabilityZone']
# Fill the port information
- cp_port_list = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] != 0]
+
+ # cp_port_list = [port for port in port_list if port.attachment and port.attachment['DeviceIndex'] != 0]
+ # The above conversion of the port list was leaving out the management networks attached to the vdu.
- for port in cp_port_list:
+ for port in port_list:
c_point = vdu.connection_points.add()
RwcalAWSPlugin._fill_connection_point_info(c_point, port)
+
return vdu
link_id - id for the virtual-link
Returns:
- Object of type RwcalYang.VirtualLinkInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
drv = self._get_driver(account)
network = drv.get_subnet(SubnetId=link_id)
virtual_link = RwcalAWSPlugin._fill_virtual_link_info(network, port_list)
return virtual_link
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[[]])
def do_get_virtual_link_list(self, account):
"""Get information about all the virtual links
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VirtualLinkInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
drv = self._get_driver(account)
networks = drv.get_subnet_list()
for network in networks:
Arguments:
account - a cloud account
- vdu_init - information about VDU to create (RwcalYang.VDUInitParams)
+ vdu_init - information about VDU to create (RwcalYang.YangData_RwProject_Project_VduInitParams)
Returns:
The vdu_id
Arguments:
account - a cloud account
- vdu_modify - Information about VDU Modification (RwcalYang.VDUModifyParams)
+ vdu_modify - Information about VDU Modification (RwcalYang.YangData_RwProject_Project_VduModifyParams)
"""
### First create required number of ports aka connection points
drv = self._get_driver(account)
for c_point in vdu_modify.connection_points_remove:
port = drv.get_network_interface(NetworkInterfaceId=c_point.connection_point_id)
#Check if elastic IP is associated with interface and release it
- if port and port.association and 'AssociationId' in port.association:
+ if port and port.association is not None:
drv.disassociate_public_ip_from_network_interface(NetworkInterfaceId=port.id)
if port and port.attachment and port.attachment['DeviceIndex'] != 0:
drv.detach_network_interface(AttachmentId = port.attachment['AttachmentId'],Force=True) #force detach as otherwise delete fails
self.cleanup_vdu_on_term(account,vdu_id,delete_port_list)
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
"""Get information about a virtual deployment unit.
Arguments:
account - a cloud account
- vdu_id - id for the vdu
+ vdu_id - id for the vdu,
+ mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need for aws.
Returns:
- Object of type RwcalYang.VDUInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
drv = self._get_driver(account)
return RwcalAWSPlugin._fill_vdu_info(vm,port_list)
- @rwstatus(ret_on_failure=[[]])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
"""Get information about all the virtual deployment units
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VDUInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
drv = self._get_driver(account)
vms = drv.list_instances()
for vm in vms:
include(rift_plugin)
-set(PKG_NAME rwcal-cloudsim)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-cloudsim)
-rift_install_python_plugin(rwcal_cloudsim rwcal_cloudsim.py)
+rift_install_gobject_python_plugin(rwcal_cloudsim rwcal_cloudsim.py COMPONENT ${INSTALL_COMPONENT})
rift_python_install_tree(
FILES
rift/rwcal/cloudsim/net.py
rift/rwcal/cloudsim/shell.py
PYTHON3_ONLY
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT})
self._bridge_to_ports = collections.defaultdict(list)
# Create the management network
- self.mgmt_network = RwcalYang.NetworkInfoItem()
+ self.mgmt_network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
self.mgmt_network.network_name = MGMT_NETWORK_NAME
network = MGMT_NETWORK_INTERFACE_IP.network
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus(
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus(
status="success",
details=""
)
@rwstatus(ret_on_failure=[[]])
def do_get_image_list(self, account):
"""Returns a list of images"""
- resources = RwcalYang.VimResources()
+ resources = RwcalYang.YangData_RwProject_Project_VimResources()
for image in self.cal.get_image_list():
resources.imageinfo_list.append(rwcal_copy_object(image))
a list of VMInfoItem objects
"""
- resources = RwcalYang.VimResources()
+ resources = RwcalYang.YangData_RwProject_Project_VimResources()
for vm in self.cal.get_vm_list():
resources.vminfo_list.append(rwcal_copy_object(vm))
"""
Return a list of flavors
"""
- vim_resources = RwcalYang.VimResources()
+ vim_resources = RwcalYang.YangData_RwProject_Project_VimResources()
for flavor in self.cal.flavors.values():
- f = RwcalYang.FlavorInfoItem()
+ f = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
f.copy_from(flavor)
vim_resources.flavorinfo_list.append(f)
logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
@rwstatus(ret_on_failure=[[]])
def do_get_port_list(self, account):
"""Returns a list of ports"""
- resources = RwcalYang.VimResources()
+ resources = RwcalYang.YangData_RwProject_Project_VimResources()
for port in self.datastore.cal_manager.get_port_list():
resources.portinfo_list.append(rwcal_copy_object(port))
@rwstatus(ret_on_failure=[[]])
def do_get_network_list(self, account):
"""Returns a list of network objects"""
- resources = RwcalYang.VimResources()
+ resources = RwcalYang.YangData_RwProject_Project_VimResources()
for network in self.cal.get_network_list():
resources.networkinfo_list.append(rwcal_copy_object(network))
Returns:
The vdu_id
"""
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_name = link_params.name
network.subnet = link_params.subnet
@staticmethod
def fill_connection_point_info(c_point, port_info):
- """Create a GI object for RwcalYang.VDUInfoParams_ConnectionPoints()
+ """Create a GI object for RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints()
Converts Port information dictionary object returned by container cal
driver into Protobuf Gi Object
Arguments:
port_info - Port information from container cal
Returns:
- Protobuf Gi object for RwcalYang.VDUInfoParams_ConnectionPoints
+ Protobuf Gi object for RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints
"""
c_point.name = port_info.port_name
c_point.connection_point_id = port_info.port_id
Returns:
Protobuf Gi object for VirtualLinkInfoParams
"""
- link = RwcalYang.VirtualLinkInfoParams()
+ link = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
link.name = network_info.network_name
link.state = 'active'
link.virtual_link_id = network_info.network_id
link_id - id for the virtual-link
Returns:
- Object of type RwcalYang.VirtualLinkInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
network = self.do_get_network(account, link_id, no_rwstatus=True)
return virtual_link
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[None])
def do_get_virtual_link_list(self, account):
"""Get information about all the virtual links
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VirtualLinkInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
networks = self.do_get_network_list(account, no_rwstatus=True)
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
for network in networks.networkinfo_list:
virtual_link = self.do_get_virtual_link(account, network.network_id, no_rwstatus=True)
vnf_resources.virtual_link_info_list.append(virtual_link)
account - a cloud account
c_point - connection_points
"""
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_name = c_point.name
port.network_id = c_point.virtual_link_id
port.port_type = 'normal' ### Find Port type from network_profile under cloud account
Arguments:
account - a cloud account
- vdu_init - information about VDU to create (RwcalYang.VDUInitParams)
+ vdu_init - information about VDU to create (RwcalYang.YangData_RwProject_Project_VduInitParams)
Returns:
The vdu_id
"""
### Create VM
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_name = vdu_init.name
vm.image_id = vdu_init.image_id
if vdu_init.vdu_init.has_field('userdata'):
Arguments:
account - a cloud account
- vdu_modify - Information about VDU Modification (RwcalYang.VDUModifyParams)
+ vdu_modify - Information about VDU Modification (RwcalYang.YangData_RwProject_Project_VduModifyParams)
"""
### First create required number of ports aka connection points
port_list = []
returns:
protobuf gi object for vduinfoparams
"""
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.name = vm_info.vm_name
vdu.vdu_id = vm_info.vm_id
vdu.management_ip = vm_info.management_ip
return vdu
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
"""Get information about a virtual deployment unit.
Arguments:
account - a cloud account
vdu_id - id for the vdu
+ mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need for cloudsim.
Returns:
- Object of type RwcalYang.VDUInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
port_id_list = self.cal.get_vm_ports(vdu_id)
ports = [self.cal.get_port(p_id) for p_id in port_id_list]
return vdu_info
- @rwstatus(ret_on_failure=[None])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
"""Get information about all the virtual deployment units
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VDUInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
vm_resources = self.do_get_vm_list(account, no_rwstatus=True)
for vm in vm_resources.vminfo_list:
@classmethod
def create_image(cls):
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "rift-lxc-image"
image.location = "/net/sharedfiles/home1/common/vm/R0.4/rift-mano-devel-latest.qcow2"
image.disk_format = "qcow2"
cls.cleanUp()
lvm.create("rift")
- cls.account = RwcalYang.CloudAccount()
+ cls.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
cls.cal = rwcal_cloudsim.CloudSimPlugin()
cls.create_image()
return vm
def create_virtual_link(self, index):
- link = RwcalYang.VirtualLinkReqParams()
+ link = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
link.name = 'link-{}'.format(index + 1)
link.subnet = '192.168.{}.0/24'.format(index + 1)
return link, link_id
def create_vdu(self, image, index, virtual_link_ids=None):
- vdu_init = RwcalYang.VDUInitParams()
+ vdu_init = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu_init.name = 'rift-vdu{}'.format(index + 1)
vdu_init.node_id = str(uuid.uuid4())
vdu_init.image_id = image.id
def test_create_delete_vdu(self):
vdu, vdu_id = self.create_vdu(self.image, 0)
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert get_vdu.image_id == self.image.id
assert get_vdu.name == vdu.name
def test_create_vdu_single_connection_point(self):
link, link_id = self.create_virtual_link(0)
vdu, vdu_id = self.create_vdu(self.image, 0, [link_id])
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert len(get_vdu.connection_points) == 1
cp = get_vdu.connection_points[0]
assert (ipaddress.IPv4Address(cp.ip_address) in
link_id_map = {link1_id: link1, link2_id: link2, link3_id: link3}
vdu, vdu_id = self.create_vdu(self.image, 0, link_id_map.keys())
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert len(get_vdu.connection_points) == 3
for cp in get_vdu.connection_points:
assert cp.virtual_link_id in link_id_map
vdu, vdu_id = self.create_vdu(self.image, 0)
link, link_id = self.create_virtual_link(0)
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert len(get_vdu.connection_points) == 0
- modify_vdu = RwcalYang.VDUModifyParams()
+ modify_vdu = RwcalYang.YangData_RwProject_Project_VduModifyParams()
modify_vdu.vdu_id = vdu_id
cp = modify_vdu.connection_points_add.add()
cp.virtual_link_id = link_id
cp.name = "link_1"
self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert len(get_vdu.connection_points) == 1
- modify_vdu = RwcalYang.VDUModifyParams()
+ modify_vdu = RwcalYang.YangData_RwProject_Project_VduModifyParams()
modify_vdu.vdu_id = vdu_id
cp = modify_vdu.connection_points_remove.add()
cp.connection_point_id = get_vdu.connection_points[0].connection_point_id
self.cal.do_modify_vdu(self.account, modify_vdu, no_rwstatus=True)
- get_vdu = self.cal.do_get_vdu(self.account, vdu_id, no_rwstatus=True)
+ get_vdu = self.cal.do_get_vdu(self.account, vdu_id, None, no_rwstatus=True)
assert len(get_vdu.connection_points) == 0
self.cal.do_delete_vdu(self.account, vdu_id, no_rwstatus=True)
include(rift_plugin)
-set(PKG_NAME rwcal-cloudsimproxy)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-cloudsimproxy)
-
-rift_install_python_plugin(rwcal_cloudsimproxy rwcal_cloudsimproxy.py)
+rift_install_gobject_python_plugin(rwcal_cloudsimproxy rwcal_cloudsimproxy.py COMPONENT ${INSTALL_COMPONENT})
"""
self._set_host_from_account(account)
- status = RwcalYang.CloudConnectionStatus()
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus()
try:
self._proxy_rpc_call("get_vm_list")
except Exception as e:
link_id - id for the virtual-link
Returns:
- Object of type RwcalYang.VirtualLinkInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
self._set_host_from_account(account)
return self._proxy_rpc_call("get_virtual_link", link_id=link_id)
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[[]])
def do_get_virtual_link_list(self, account):
"""Returns the a list of the Virtual links
Returns:
- a list of RwcalYang.VirtualLinkInfoParams objects
+ a list of RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList objects
"""
self._set_host_from_account(account)
Arguments:
account - a cloud account
- vdu_init - information about VDU to create (RwcalYang.VDUInitParams)
+ vdu_init - information about VDU to create (RwcalYang.YangData_RwProject_Project_VduInitParams)
Returns:
The vdu_id
Arguments:
account - a cloud account
- vdu_modify - Information about VDU Modification (RwcalYang.VDUModifyParams)
+ vdu_modify - Information about VDU Modification (RwcalYang.YangData_RwProject_Project_VduModifyParams)
"""
self._set_host_from_account(account)
return self._proxy_rpc_call("modify_vdu", vdu_params=vdu_modify.as_dict())
return self._proxy_rpc_call("delete_vdu", vdu_id=vdu_id)
@rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
"""Get information about a virtual deployment unit.
Arguments:
account - a cloud account
vdu_id - id for the vdu
+ mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need for cloudsimproxy.
Returns:
- Object of type RwcalYang.VDUInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
self._set_host_from_account(account)
return self._proxy_rpc_call("get_vdu", vdu_id=vdu_id)
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VDUInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
self._set_host_from_account(account)
return self._proxy_rpc_call("get_vdu_list")
include(rift_plugin)
### rwcal-mock package
-set(PKG_NAME rwcal-mock)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-mock)
-
-rift_install_python_plugin(rwcal_mock rwcal_mock.py)
+rift_install_gobject_python_plugin(rwcal_mock rwcal_mock.py COMPONENT ${INSTALL_COMPONENT})
)
)
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.name = 'mock_account'
account.account_type = 'mock'
account.mock.username = 'mock_user'
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus(
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus(
status="success",
details=""
)
"""
Return a list of the names of all available images.
"""
- boxed_image_list = RwcalYang.VimResources()
+ boxed_image_list = RwcalYang.YangData_RwProject_Project_VimResources()
for image in self.resources[account.name].images.values():
- image_entry = RwcalYang.ImageInfoItem()
+ image_entry = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image_entry.id = image.id
image_entry.name = image.name
if image.has_field('checksum'):
"""
Return a list of flavors
"""
- vim_resources = RwcalYang.VimResources()
+ vim_resources = RwcalYang.YangData_RwProject_Project_VimResources()
for flavor in self.resources[account.name].flavors.values():
- f = RwcalYang.FlavorInfoItem()
+ f = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
f.copy_from(flavor)
vim_resources.flavorinfo_list.append(f)
logger.debug("Returning list of flavor-info of size: %d", len(vim_resources.flavorinfo_list))
link_list = []
### Add virtual links
#for i in range(1):
- # vlink = RwcalYang.VirtualLinkReqParams()
+ # vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
# vlink.name = 'link-'+str(i)
# vlink.subnet = '10.0.0.0/24'
# rs, vlink_id = self.do_create_virtual_link(account, vlink)
#### Add VDUs
#for i in range(8):
- # vdu = RwcalYang.VDUInitParams()
+ # vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
# vdu.name = 'vdu-'+str(i)
# vdu.node_id = str(i)
# vdu.image_id = self.get_uuid('image-'+str(i))
# logger.debug("Creating static VDU with name: %s", vdu.name)
for i in range(2):
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = 'flavor-'+str(i)
flavor.vm_flavor.vcpu_count = 4
flavor.vm_flavor.memory_mb = 4096*2
rc, flavor_id = self.do_create_flavor(account, flavor)
for i in range(2):
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "rwimage"
image.id = self.get_uuid('image-'+str(i))
image.checksum = self.get_uuid('rwimage'+str(i))
image.location = "/dev/null"
rc, image_id = self.do_create_image(account, image)
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "Fedora-x86_64-20-20131211.1-sda.qcow2"
image.id = self.get_uuid(image.name)
image.checksum = self.get_uuid(image.name)
image.location = "/dev/null"
rc, image_id = self.do_create_image(account, image)
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
image.id = self.get_uuid(image.name)
image.checksum = "a6ffaa77f949a9e4ebb082c6147187cf"#self.get_uuid(image.name)
image.location = "/dev/null"
rc, image_id = self.do_create_image(account, image)
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = "Fedora-x86_64-20-20131211.1-sda-pong.qcow2"
image.id = self.get_uuid(image.name)
image.checksum = "977484d95575f80ef8399c9cf1d45ebd"#self.get_uuid(image.name)
@rwcalstatus(ret_on_failure=[""])
def do_create_virtual_link(self, account, link_params):
vlink_id = self.get_uuid("%s_%s" % (link_params.name, len(self.resources[account.name].vlinks)))
- vlink = RwcalYang.VirtualLinkInfoParams()
+ vlink = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
vlink.name = link_params.name
vlink.state = 'active'
vlink.virtual_link_id = vlink_id
logger.debug('Returning virtual-link-info for : {}'.format(link_id))
return vlink
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[""])
def do_get_virtual_link_list(self, account):
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
for r in self.resources[account.name].vlinks.values():
- vlink = RwcalYang.VirtualLinkInfoParams()
+ vlink = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
vlink.copy_from(r)
vnf_resources.virtual_link_info_list.append(vlink)
logger.debug("Returning list of virtual-link-info of size: %d", len(vnf_resources.virtual_link_info_list))
@rwcalstatus(ret_on_failure=[""])
def do_create_vdu(self, account, vdu_init):
vdu_id = self.get_uuid("%s_%s" % (vdu_init.name, len(self.resources[account.name].vdus)))
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.vdu_id = vdu_id
vdu.name = vdu_init.name
vdu.node_id = vdu_init.node_id
p.virtual_link_id = c.virtual_link_id
# Need to add this connection_point to virtual link
vlink = self.resources[account.name].vlinks[c.virtual_link_id]
- aa = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+ aa = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList_ConnectionPoints()
aa.connection_point_id = p.connection_point_id
aa.name = p.name
aa.virtual_link_id = vlink.virtual_link_id
logger.debug('deleted vdu: {}'.format(vdu_id))
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
+ # mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need here.
vdu = self.resources[account.name].vdus[vdu_id]
logger.debug('Returning vdu-info for : {}'.format(vdu_id))
return vdu.copy()
- @rwstatus(ret_on_failure=[""])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
for r in self.resources[account.name].vdus.values():
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.copy_from(r)
vnf_resources.vdu_info_list.append(vdu)
logger.debug("Returning list of vdu-info of size: %d", len(vnf_resources.vdu_info_list))
include(rift_plugin)
-rift_install_python_plugin(rwcal_openmano rwcal_openmano.py)
+rift_install_gobject_python_plugin(rwcal_openmano rwcal_openmano.py COMPONENT ${INSTALL_COMPONENT})
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus(
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus(
status="success",
details=""
)
@rwstatus(ret_on_failure=[[]])
def do_get_vm_list(self, account):
- return RwcalYang.VimResources()
+ return RwcalYang.YangData_RwProject_Project_VimResources()
@rwstatus
def do_create_flavor(self, account, flavor):
@rwstatus(ret_on_failure=[[]])
def do_get_port_list(self, account):
- return RwcalYang.VimResources()
+ return RwcalYang.YangData_RwProject_Project_VimResources()
@rwstatus
def do_create_network(self, account, network):
@rwstatus(ret_on_failure=[[]])
def do_get_network_list(self, account):
- return RwcalYang.VimResources()
+ return RwcalYang.YangData_RwProject_Project_VimResources()
@rwstatus(ret_on_failure=[""])
def do_create_virtual_link(self, account, link_params):
def do_get_virtual_link(self, account, link_id):
raise NotImplementedError()
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[""])
def do_get_virtual_link_list(self, account):
raise NotImplementedError()
def do_delete_vdu(self, account, vdu_id):
raise NotImplementedError()
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
+ # mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need for aws.
raise NotImplementedError()
- @rwstatus(ret_on_failure=[""])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
raise NotImplementedError()
include(rift_plugin)
-### rwcal-openstack package
-set(PKG_NAME rwcal-openmano-vimconnector)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-openmano-vimconnector)
-rift_install_python_plugin(rwcal_openmano_vimconnector rwcal_openmano_vimconnector.py)
+rift_install_gobject_python_plugin(rwcal_openmano_vimconnector rwcal_openmano_vimconnector.py COMPONENT ${INSTALL_COMPONENT})
rift_python_install_tree(
FILES
rift/rwcal/openmano_vimconnector/vimconn_openvim.py
rift/rwcal/openmano_vimconnector/openmano_schemas.py
PYTHON3_ONLY
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT})
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus()
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus()
url = 'http://{}:{}/openvim/'.format(account.openvim.host,account.openvim.port)
try:
r=requests.get(url,timeout=3)
Returns:
The TenantInfoItem
"""
- tenant = RwcalYang.TenantInfoItem()
+ tenant = RwcalYang.YangData_RwProject_Project_VimResources_TenantinfoList()
tenant.tenant_name = tenant_info['name']
tenant.tenant_id = tenant_info['id']
return tenant
@rwstatus(ret_on_failure=[[]])
def do_get_tenant_list(self, account):
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
with self._use_driver(account) as drv:
tenants = drv.get_tenant_list()
for tenant in tenants:
@staticmethod
def _fill_image_info(img_info):
- img = RwcalYang.ImageInfoItem()
+ img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
img.name = img_info['name']
img.id = img_info['id']
img.location = img_info['path']
@rwstatus(ret_on_failure=[[]])
def do_get_image_list(self, account):
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
with self._use_driver(account) as drv:
images = drv.get_image_list()
for img in images:
@rwstatus(ret_on_failure=[[]])
def do_get_vm_list(self, account):
- return RwcalYang.VimResources()
+ return RwcalYang.YangData_RwProject_Project_VimResources()
def _fill_flavor_create_attributes(flavor):
flavor_dict = dict()
@staticmethod
def _fill_flavor_info(flavor_info):
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = flavor_info['name']
flavor.id = flavor_info['id']
RwcalOpenmanoVimConnector._fill_epa_attributes(flavor, flavor_info)
@rwstatus(ret_on_failure=[[]])
def do_get_flavor_list(self, account):
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
with self._use_driver(account) as drv:
flavors = drv.get_flavor_list()
for flav in flavors:
@rwstatus(ret_on_failure=[[]])
def do_get_port_list(self, account):
- return RwcalYang.VimResources()
+ return RwcalYang.YangData_RwProject_Project_VimResources()
@rwstatus
def do_create_network(self, account, network):
drv.delete_network(network_id)
def _fill_network_info(self, network_info):
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_name = network_info['name']
network.network_id = network_info['id']
if ('provider:physical' in network_info) and (network_info['provider:physical']):
@rwstatus(ret_on_failure=[[]])
def do_get_network_list(self, account):
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
with self._use_driver(account) as drv:
networks = drv.get_network_list()
for network in networks:
c_point.vdu_id = port_info['device_id']
def _fill_virtual_link_info(self, drv, network_info):
- link = RwcalYang.VirtualLinkInfoParams()
+ link = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
link.name = network_info['name']
link.virtual_link_id = network_info['id']
if network_info['admin_state_up']:
network = drv.get_network(link_id)
return self._fill_virtual_link_info(drv,network)
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
+
@rwstatus(ret_on_failure=[""])
def do_get_virtual_link_list(self, account):
- response = RwcalYang.VNFResources()
+ response = RwcalYang.YangData_RwProject_Project_VnfResources()
with self._use_driver(account) as drv:
networks = drv.get_network_list()
for network in networks:
"""
Select a existing flavor if it matches the request or create new flavor
"""
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = str(uuid.uuid4())
epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate', 'hypervisor_epa', 'vswitch_epa']
epa_dict = {k: v for k, v in vdu_init.as_dict().items() if k in epa_types}
@staticmethod
def _fill_vdu_info(drv,account,vm_info):
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.name = vm_info['name']
vdu.vdu_id = vm_info['id']
mgmt_net_id = None
RwcalOpenmanoVimConnector._fill_epa_attributes(vdu, flavor)
return vdu
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
+ # mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need here.
with self._use_driver(account) as drv:
vm_info = drv.get_vminstance(vdu_id)
return RwcalOpenmanoVimConnector._fill_vdu_info(drv,account,vm_info)
- @rwstatus(ret_on_failure=[""])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
- vnf_resource = RwcalYang.VNFResources()
+ vnf_resource = RwcalYang.YangData_RwProject_Project_VnfResources()
with self._use_driver(account) as drv:
vms = drv.get_vminstance_list()
for vm in vms:
include(rift_plugin)
### rwcal-openstack package
-set(PKG_NAME rwcal-openstack)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
+set(INSTALL_COMPONENT rwcal-plugin-openstack)
-rift_install_python_plugin(rwcal_openstack rwcal_openstack.py)
+rift_install_gobject_python_plugin(rwcal_openstack rwcal_openstack.py COMPONENT ${INSTALL_COMPONENT})
rift_python_install_tree(
FILES
rift/rwcal/openstack/utils/image.py
PYTHON3_ONLY
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT})
"""
try:
vol = self._ci_drv.volumes.get(volume_id)
+ except ciclient.exceptions.NotFound:
+ return None
except Exception as e:
self.log.error("Get volume operation failed. Exception: %s", str(e))
raise
"""
supported_versions = [(2, ), (3, )]
- def __init__(self, auth_url, logger = None):
+ def __init__(self, auth_url, cert_validate, logger = None):
"""
Constructor for class
Arguments
auth_url(string): Keystone Auth URL
+ cert_validate (boolean): Boolean to indicate if certificate validation is required
logger (instance of logging.Logger)
"""
self.log = logger
try:
- self._discover = discover.Discover(auth_url=auth_url)
+ self._discover = discover.Discover(auth_url=auth_url, insecure = not cert_validate)
except Exception as e:
self.log.exception(str(e))
self._discover = None
"""
networks = self._network_find(**{'id': network_id, 'name': network_name})
if not networks:
- raise NeutronException.NotFound("Could not find network. Network id: %s, Network name: %s " %(network_id, network_name))
+ return None
return networks[0]
security_groups : A List of Neutron security group Ids
}
Returns:
- A list of port_id (string)
+ A list of ports { port_id (string), tag (connection_name, string) }
"""
params = dict()
params['ports'] = ports
except Exception as e:
self.log.exception("Ports Create operation failed. Exception: %s",str(e))
raise
- return [ p['id'] for p in ports['ports'] ]
+ return [ { "id": p['id'], "tag": p['name'] } for p in ports['ports'] ]
def port_update(self, port_id, no_security_groups=None,port_security_enabled=None):
self._sess_handle = sess_handle
+ self._max_api_version = None
+ self._min_api_version = None
+
#### Attempt to use API versions in prioritized order defined in
#### NovaDriver.supported_versions
def select_version(version):
service_type = service_type,
session = self._sess_handle.session,
logger = self.log)
+
+ api_version = 'v' + nvdrv.versions.api_version.get_string()
+ nova_version_list = nvdrv.versions.list()
+ max_api_version, min_api_version = None, None
+
+ for v in nova_version_list:
+ version_dict = v.to_dict()
+ if api_version == version_dict["id"]:
+ max_api_version = version_dict["version"] # Max version supported is stored in version field.
+ min_api_version = version_dict["min_version"]
+ break
+
except Exception as e:
self.log.info(str(e))
raise
else:
self.log.info("Nova API v%s selected", version)
- return (version, nvdrv)
+ return (version, nvdrv, max_api_version, min_api_version)
errors = []
for v in NovaDriver.supported_versions:
try:
- (self._version, self._nv_drv) = select_version(v)
+ (self._version, self._nv_drv, self._max_api_version, self._min_api_version) = select_version(v)
except Exception as e:
errors.append(e)
else:
"""
try:
extra_specs = flavor.get_keys()
+ except nvclient.exceptions.NotFound:
+ return None
except Exception as e:
self.log.exception("Could not get the EPA attributes for flavor with flavor_id : %s. Exception: %s",
flavor.id, str(e))
"""
try:
flavor = self._nv_drv.flavors.get(flavor_id)
+ except nvclient.exceptions.NotFound:
+ return None
except Exception as e:
self.log.exception("Did not find flavor with flavor_id : %s. Exception: %s",flavor_id, str(e))
raise
response = flavor.to_dict()
- response['extra_specs'] = self._flavor_extra_spec_get(flavor)
+ try:
+ response['extra_specs'] = self._flavor_extra_spec_get(flavor)
+ except nvclient.exceptions.NotFound:
+ pass
+ except Exception as e:
+ self.log.exception("Did not find extra_specs in flavor with flavor_id : %s. Exception: %s",flavor_id, str(e))
+ raise
return response
try:
if 'port_list' in kwargs:
for port_id in kwargs['port_list']:
- nics.append({'port-id': port_id})
+ port = { 'port-id': port_id['id'] }
+ nics.append(port)
try:
server = self._nv_drv.servers.create(
max_count = None,
userdata = kwargs['userdata'] if 'userdata' in kwargs else None,
security_groups = kwargs['security_groups'] if 'security_groups' in kwargs else None,
- availability_zone = kwargs['availability_zone'] if 'availability_zone' in kwargs else None,
+ availability_zone = kwargs['availability_zone'].name if 'availability_zone' in kwargs else None,
block_device_mapping_v2 = kwargs['block_device_mapping_v2'] if 'block_device_mapping_v2' in kwargs else None,
nics = nics,
scheduler_hints = kwargs['scheduler_hints'] if 'scheduler_hints' in kwargs else None,
try:
console_info = self._nv_drv.servers.get_vnc_console(server_id, console_type)
except Exception as e:
- self.log.exception("Server Get-Console operation failed for server_id: %s. Exception: %s",
- server_id, str(e))
- raise
+ # TODO: This error keeps repeating incase there is no console available
+ # So reduced level from exception to warning
+ self.log.warning("Server Get-Console operation failed for server_id: %s. Exception: %s",
+ server_id, str(e))
+ raise e
return console_info
def server_rebuild(self, server_id, image_id):
region = kwargs['region_name'] if 'region_name' in kwargs else False
mgmt_network = kwargs['mgmt_network'] if 'mgmt_network' in kwargs else None
- discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'], logger = self.log)
+ discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'],
+ cert_validate,
+ logger = self.log)
(major, minor) = discover.get_version()
self.sess_drv = sess_drv.SessionDriver(auth_method = 'password',
def nova_server_create(self, **kwargs):
if 'security_groups' not in kwargs:
- kwargs['security_groups'] = [s['name'] for s in self._nova_security_groups]
+ security_groups = [s['name'] for s in self._nova_security_groups]
+ #Remove the security group names that are duplicate - RIFT-17035
+ valid_security_groups = list(filter(lambda s: security_groups.count(s) == 1, security_groups))
+ kwargs['security_groups'] = valid_security_groups
return self.nova_drv.server_create(**kwargs)
def nova_server_add_port(self, server_id, port_id):
def neutron_network_get(self, network_id):
return self.neutron_drv.network_get(network_id=network_id)
+ def neutron_network_get_by_name(self, network_name):
+ return self.neutron_drv.network_get(network_name=network_name)
+
def neutron_network_create(self, **kwargs):
return self.neutron_drv.network_create(**kwargs)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
-
rwlog_handler = rwlogger.RwLogger(category="rw-cal-log",
subcategory="openstack",)
logger.addHandler(rwlog_handler)
def allocate_floating_ip(drv, argument):
#### Allocate a floating_ip
- available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
+ try:
+ available_ip = [ ip for ip in drv.nova_floating_ip_list() if ip.instance_id == None ]
- if argument.pool_name:
- ### Filter further based on IP address
- available_ip = [ ip for ip in available_ip if ip.pool == argument.pool_name ]
-
- if not available_ip:
- logger.info("<PID: %d> No free floating_ips available. Allocating fresh from pool: %s" %(os.getpid(), argument.pool_name))
- pool_name = argument.pool_name if argument.pool_name is not None else None
- floating_ip = drv.nova_floating_ip_create(pool_name)
- else:
- floating_ip = random.choice(available_ip)
- logger.info("<PID: %d> Selected floating_ip: %s from available free pool" %(os.getpid(), floating_ip))
+ if argument.pool_name:
+ ### Filter further based on IP address
+ available_ip = [ ip for ip in available_ip if ip.pool == argument.pool_name ]
+
+ if not available_ip:
+ logger.info("<PID: %d> No free floating_ips available. Allocating fresh from pool: %s" %(os.getpid(), argument.pool_name))
+ pool_name = argument.pool_name if argument.pool_name is not None else None
+ floating_ip = drv.nova_floating_ip_create(pool_name)
+ else:
+ floating_ip = random.choice(available_ip)
+ logger.info("<PID: %d> Selected floating_ip: %s from available free pool" %(os.getpid(), floating_ip))
- return floating_ip
+ return floating_ip
+
+ except Exception as e:
+ logger.error("Floating IP Allocation Failed - %s", e)
+ return None
def handle_floating_ip_assignment(drv, server, argument, management_ip):
for n_info in network_info:
if 'OS-EXT-IPS:type' in n_info and n_info['OS-EXT-IPS:type'] == 'fixed':
management_ip = n_info['addr']
- handle_floating_ip_assignment(drv, server, argument, management_ip)
- return
+ try:
+ handle_floating_ip_assignment(drv, server, argument, management_ip)
+ return
+ except Exception as e:
+ logger.error("Exception in assign_floating_ip_address : %s", e)
+ raise
else:
logger.info("Waiting for management_ip to be assigned to server: %s" %(server['name']))
time.sleep(1)
else:
logger.error("Server %s did not reach active state in %d seconds. Current state: %s" %(server['name'], wait_time, server['status']))
sys.exit(4)
-
#create_port_metadata(drv, argument)
create_volume_metadata(drv, argument)
- assign_floating_ip_address(drv, argument)
+ try:
+ assign_floating_ip_address(drv, argument)
+ except Exception as e:
+ logger.error("Exception in prepare_vm_after_boot : %s", e)
+ raise
def main():
region = argument.region)
drv = openstack_drv.OpenstackDriver(logger = logger, **kwargs)
- prepare_vm_after_boot(drv, argument)
- sys.exit(0)
+ try:
+ prepare_vm_after_boot(drv, argument)
+ except Exception as e:
+ logger.error("Exception in main of prepare_vm : %s", e)
+ raise
if __name__ == "__main__":
- main()
+ try:
+ main()
+ # Do not print anything in this script. This is a subprocess spawned by rwmain
+ # and the following print determines the success or failure of this script.
+ print("True",end="")
+ except Exception as e:
+ logger.error("Exception in prepare_vm : %s", e)
+ # Do not print anything in this script. This is a subprocess spawned by rwmain
+ # and the following print determines the success or failure of this script.
+ print("False+" + str(e),end="")
+ sys.exit(2)
from already existing flavors
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
flavor_id(string): Flavor id for VDU instantiation
None if no flavor could be found
"""
+
+ if vdu_params.vm_flavor.has_field('vm_flavor_name') and \
+ vdu_params.vm_flavor.vm_flavor_name is not None:
+ nova_flavor_list = self.driver.nova_flavor_list()
+ for flavor in nova_flavor_list:
+ self.log.debug("Flavor {} ".format(flavor.get('name', '')))
+ if flavor.get('name', '') == vdu_params.vm_flavor.vm_flavor_name:
+ return flavor['id']
+
kwargs = { 'vcpus': vdu_params.vm_flavor.vcpu_count,
'ram' : vdu_params.vm_flavor.memory_mb,
'disk' : vdu_params.vm_flavor.storage_gb,}
is created.
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
flavor_id(string): Flavor id for VDU instantiation
flavor_id, vdu_params.name)
return flavor_id
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = str(uuid.uuid4())
epa_dict = { k: v for k, v in vdu_params.as_dict().items()
"""
Creates flavor related arguments for VDU operation
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary {'flavor_id': <flavor-id>}
"""
Creates image related arguments for VDU operation
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary {'image_id': <image-id>}
def make_vdu_volume_args(self, volume, vdu_params):
"""
Arguments:
- volume: Protobuf GI object RwcalYang.VDUInitParams_Volumes()
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ volume: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams_Volumes()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary required to create volume for VDU
Creates volume related arguments for VDU operation
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary required for volumes creation for VDU instantiation
"""
Creates VDU network related arguments for VDU operation
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary {'port_list' : [ports], 'network_list': [networks]}
"""
kwargs = dict()
kwargs['port_list'], kwargs['network_list'] = self.driver.utils.network.setup_vdu_networking(vdu_params)
+
return kwargs
"""
Creates VDU boot config related arguments for VDU operation
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary {
# Rift model only
if vdu_params.supplemental_boot_data.has_field('custom_meta_data'):
for cm in vdu_params.supplemental_boot_data.custom_meta_data:
- metadata[cm.name] = cm.value
- kwargs['metadata'] = metadata
+ # Adding this condition as the list contains CLOUD_INIT Variables as
+ # well. CloudInit Variables such as password are visible on the OpenStack UI
+ # if not removed from the custom_meta_data list.
+ if cm.destination == 'CLOUD_METADATA':
+ metadata[cm.name] = cm.value
+ kwargs['metadata'] = metadata
except Exception as e:
pass
Function to create kwargs required for nova server placement
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A dictionary { 'availability_zone' : < Zone >, 'scheduler_hints': <group-id> }
Function to create kwargs required for nova security group
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
- account: Protobuf GI object RwcalYang.CloudAccount()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
+ account: Protobuf GI object RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
Returns:
A dictionary {'security_groups' : < group > }
Function to create kwargs required for nova_server_create API
Arguments:
- vdu_params: Protobuf GI object RwcalYang.VDUInitParams()
- account: Protobuf GI object RwcalYang.CloudAccount()
+ vdu_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams()
+ account: Protobuf GI object RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
Returns:
A kwargs dictionary for VDU create operation
mgmt_ip = interface['addr']
elif interface['OS-EXT-IPS:type'] == 'floating':
public_ip = interface['addr']
+
return (mgmt_ip, public_ip)
def get_vdu_epa_info(self, vm_info):
Arguments:
vdu_id (string) : VDU Id (vm_info['id'])
Returns:
- A List of object RwcalYang.VDUInfoParams_ConnectionPoints()
+ A List of object RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints()
"""
cp_list = []
port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
for port in port_list:
cp_info = self.driver.utils.network._parse_cp(port)
- cp = RwcalYang.VDUInfoParams_ConnectionPoints()
+ cp = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_ConnectionPoints()
cp.from_dict(cp_info.as_dict())
cp_list.append(cp)
return cp_list
vm_info : A dictionary returned by novaclient library listing VM attributes
Returns:
- List of RwcalYang.VDUInfoParams_SupplementalBootData()
+ List of RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_SupplementalBootData()
"""
supplemental_boot_data = None
node_id = None
if 'config_drive' in vm_info:
- supplemental_boot_data = RwcalYang.VDUInfoParams_SupplementalBootData()
+ supplemental_boot_data = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_SupplementalBootData()
supplemental_boot_data.boot_data_drive = vm_info['config_drive']
# Look for any metadata
if 'metadata' not in vm_info:
return node_id, supplemental_boot_data
if supplemental_boot_data is None:
- supplemental_boot_data = RwcalYang.VDUInfoParams_SupplementalBootData()
+ supplemental_boot_data = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_SupplementalBootData()
for key, value in vm_info['metadata'].items():
if key == 'rift_node_id':
node_id = value
vm_info : A dictionary returned by novaclient library listing VM attributes
Returns:
- List of RwcalYang.VDUInfoParams_Volumes()
+ List of RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_Volumes()
"""
volumes = list()
return volumes
for v in volume_list:
- volume = RwcalYang.VDUInfoParams_Volumes()
+ volume = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList_Volumes()
try:
volume.name = (v['device']).split('/')[2]
volume.volume_id = v['volumeId']
details = self.driver.cinder_volume_get(volume.volume_id)
+ if details is None:
+ continue
try:
# Rift only
for k, v in details.metadata.items():
except Exception as e:
- self.log.exception("Exception %s occured during volume list parsing", str(e))
+ self.log.warning("Exception %s occured during volume list parsing", str(e))
return console_url
def parse_cloud_vdu_info(self, vm_info):
vm_info : A dictionary object return by novaclient library listing VM attributes
Returns:
- Protobuf GI Object of type RwcalYang.VDUInfoParams()
+ Protobuf GI Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
"""
- vdu = RwcalYang.VDUInfoParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList()
vdu.name = vm_info['name']
vdu.vdu_id = vm_info['id']
vdu.cloud_type = 'openstack'
if 'flavor' in vm_info and 'id' in vm_info['flavor']:
vdu.flavor_id = vm_info['flavor']['id']
flavor_info = self.get_vdu_epa_info(vm_info)
- vm_flavor = self.driver.utils.flavor.parse_vm_flavor_epa_info(flavor_info)
- guest_epa = self.driver.utils.flavor.parse_guest_epa_info(flavor_info)
- host_epa = self.driver.utils.flavor.parse_host_epa_info(flavor_info)
- host_aggregates = self.driver.utils.flavor.parse_host_aggregate_epa_info(flavor_info)
-
- vdu.vm_flavor.from_dict(vm_flavor.as_dict())
- vdu.guest_epa.from_dict(guest_epa.as_dict())
- vdu.host_epa.from_dict(host_epa.as_dict())
- for aggr in host_aggregates:
- ha = vdu.host_aggregate.add()
- ha.from_dict(aggr.as_dict())
+ if flavor_info is not None:
+ vm_flavor = self.driver.utils.flavor.parse_vm_flavor_epa_info(flavor_info)
+ guest_epa = self.driver.utils.flavor.parse_guest_epa_info(flavor_info)
+ host_epa = self.driver.utils.flavor.parse_host_epa_info(flavor_info)
+ host_aggregates = self.driver.utils.flavor.parse_host_aggregate_epa_info(flavor_info)
+
+ vdu.vm_flavor.from_dict(vm_flavor.as_dict())
+ vdu.guest_epa.from_dict(guest_epa.as_dict())
+ vdu.host_epa.from_dict(host_epa.as_dict())
+ for aggr in host_aggregates:
+ ha = vdu.host_aggregate.add()
+ ha.from_dict(aggr.as_dict())
node_id, boot_data = self._parse_vdu_boot_config_data(vm_info)
if node_id:
port_list = self.driver.neutron_port_list(**{'device_id': vdu_id})
for port in port_list:
- if ((port['device_owner'] == 'compute:None') or (port['device_owner'] == '')):
- self.driver.neutron_port_delete(port['id'])
+ self.driver.neutron_port_delete(port['id'])
flavor_info: A dictionary object return by novaclient library listing flavor attributes
Returns:
- vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+ vm_flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_VmFlavor()
"""
- vm_flavor = RwcalYang.FlavorInfoItem_VmFlavor()
+ vm_flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_VmFlavor()
if 'vcpus' in flavor_info and flavor_info['vcpus']:
vm_flavor.vcpu_count = flavor_info['vcpus']
flavor_info: A dictionary object return by novaclient library listing flavor attributes
Returns:
- guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+ guest_epa = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_GuestEpa()
"""
- guest_epa = RwcalYang.FlavorInfoItem_GuestEpa()
+ guest_epa = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_GuestEpa()
+ if 'extra_specs' not in flavor_info or flavor_info['extra_specs'] is None:
+ return guest_epa
for attr in flavor_info['extra_specs']:
if attr == 'hw:cpu_policy':
cpu_pinning_policy = self._epa.guest.extra_spec_to_mano_cpu_pinning_policy(flavor_info['extra_specs']['hw:cpu_policy'])
flavor_info: A dictionary object return by novaclient library listing flavor attributes
Returns:
- host_epa = RwcalYang.FlavorInfoItem_HostEpa()
+ host_epa = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_HostEpa()
"""
- host_epa = RwcalYang.FlavorInfoItem_HostEpa()
+ host_epa = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_HostEpa()
+ if 'extra_specs' not in flavor_info or flavor_info['extra_specs'] is None:
+ return host_epa
for attr in flavor_info['extra_specs']:
if attr == 'capabilities:cpu_info:model':
cpu_model = self._epa.host.extra_specs_to_mano_cpu_model(flavor_info['extra_specs']['capabilities:cpu_info:model'])
flavor_info: A dictionary object return by novaclient library listing flavor attributes
Returns:
- A list of objects host_aggregate of type RwcalYang.FlavorInfoItem_HostAggregate()
+ A list of objects host_aggregate of type RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_HostAggregate()
"""
host_aggregates = list()
+ if 'extra_specs' not in flavor_info or flavor_info['extra_specs'] is None:
+ return host_aggregates
for attr in flavor_info['extra_specs']:
if attr.startswith('aggregate_instance_extra_specs:'):
- aggregate = RwcalYang.FlavorInfoItem_HostAggregate()
+ aggregate = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList_HostAggregate()
aggregate.metadata_key = ":".join(attr.split(':')[1::])
aggregate.metadata_value = flavor_info['extra_specs'][attr]
host_aggregates.append(aggregate)
flavor_info: A dictionary object returned by novaclient library listing flavor attributes
Returns:
- Protobuf GI Object of type RwcalYang.FlavorInfoItem()
+ Protobuf GI Object of type RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
"""
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
if 'name' in flavor_info and flavor_info['name']:
flavor.name = flavor_info['name']
if 'id' in flavor_info and flavor_info['id']:
"""
Match EPA attributes
Arguments:
- resource_info: Protobuf GI object RwcalYang.FlavorInfoItem()
+ resource_info: Protobuf GI object RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
Following attributes would be accessed
- vm_flavor
- guest_epa
- host_epa
- host_aggregate
- request_params: Protobuf GI object RwcalYang.VDUInitParams().
+ request_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams().
Following attributes would be accessed
- vm_flavor
- guest_epa
def match_resource_flavor(self, vdu_init, flavor_list):
"""
Arguments:
- vdu_init: Protobuf GI object RwcalYang.VDUInitParams().
- flavor_list: List of Protobuf GI object RwcalYang.FlavorInfoItem()
+ vdu_init: Protobuf GI object RwcalYang.YangData_RwProject_Project_VduInitParams().
+ flavor_list: List of Protobuf GI object RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
Returns:
Flavor_ID -- If match is found between vdu_init and one of flavor_info from flavor_list
Returns:
Protobuf GI Object of type RwcalYang.ImageInfoItem()
"""
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
if 'name' in image_info and image_info['name']:
image.name = image_info['name']
if 'id' in image_info and image_info['id']:
#!/usr/bin/python
-#
+#
# Copyright 2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@property
def driver(self):
return self._driver
-
+
def _parse_cp(self, cp_info):
"""
- Parse the port_info dictionary returned by neutronclient
+ Parse the port_info dictionary returned by neutronclient
Arguments:
cp_info: A dictionary object representing port attributes
Returns:
- Protobuf GI oject of type RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+ Protobuf GI oject of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList_ConnectionPoints()
"""
- cp = RwcalYang.VirtualLinkInfoParams_ConnectionPoints()
+ cp = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList_ConnectionPoints()
if 'name' in cp_info and cp_info['name']:
cp.name = cp_info['name']
-
+
if 'id' in cp_info and cp_info['id']:
cp.connection_point_id = cp_info['id']
-
+
if ('fixed_ips' in cp_info) and (len(cp_info['fixed_ips']) >= 1):
if 'ip_address' in cp_info['fixed_ips'][0]:
cp.ip_address = cp_info['fixed_ips'][0]['ip_address']
-
+
if 'mac_address' in cp_info and cp_info['mac_address']:
cp.mac_addr = cp_info['mac_address']
-
+
if cp_info['status'] == 'ACTIVE':
cp.state = 'active'
else:
cp.state = 'inactive'
-
+
if 'network_id' in cp_info and cp_info['network_id']:
cp.virtual_link_id = cp_info['network_id']
-
+
if 'device_id' in cp_info and cp_info['device_id']:
cp.vdu_id = cp_info['device_id']
+
+ if 'allowed_address_pairs' in cp_info and cp_info['allowed_address_pairs']:
+ for vcp in cp_info['allowed_address_pairs']:
+ vcp_info = cp.virtual_cp_info.add()
+ if 'ip_address' in vcp and vcp['ip_address']:
+ vcp_info.ip_address = vcp['ip_address']
+ if 'mac_address' in vcp and vcp['mac_address']:
+ vcp_info.mac_address = vcp['mac_address']
+ return cp
+
+ def _parse_virtual_cp(self, cp_info):
+ """
+ Parse the port_info dictionary returned by neutronclient
+ Arguments:
+ cp_info: A dictionary object representing port attributes
+
+ Returns:
+ Protobuf GI oject of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList_VirtualConnectionPoints()
+ """
+ cp = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList_VirtualConnectionPoints()
+
+ if 'id' in cp_info and cp_info['id']:
+ cp.connection_point_id = cp_info['id']
+
+ if 'name' in cp_info and cp_info['name']:
+ cp.name = cp_info['name']
+
+ if ('fixed_ips' in cp_info) and (len(cp_info['fixed_ips']) >= 1):
+ if 'ip_address' in cp_info['fixed_ips'][0]:
+ cp.ip_address = cp_info['fixed_ips'][0]['ip_address']
+
+ if 'mac_address' in cp_info and cp_info['mac_address']:
+ cp.mac_address = cp_info['mac_address']
+
return cp
def parse_cloud_virtual_link_info(self, vlink_info, port_list, subnet):
Arguments:
vlink_info : A dictionary object return by neutronclient library listing network attributes
-
+
Returns:
- Protobuf GI Object of type RwcalYang.VirtualLinkInfoParams()
+ Protobuf GI Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
"""
- link = RwcalYang.VirtualLinkInfoParams()
+ link = RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList()
link.name = vlink_info['name']
if 'status' in vlink_info and vlink_info['status'] == 'ACTIVE':
link.state = 'active'
link.virtual_link_id = vlink_info['id']
for port in port_list:
- if ('device_owner' in port) and (port['device_owner'] == 'compute:None'):
+ if ('device_owner' in port) and (port['device_owner'] in ['compute:nova', 'compute:None']):
link.connection_points.append(self._parse_cp(port))
+ if ('device_owner' in port) and (port['device_owner'] == ''):
+ link.virtual_connection_points.append(self._parse_virtual_cp(port))
if subnet is not None:
link.subnet = subnet['cidr']
link.provider_network.physical_network = vlink_info['provider:physical_network'].upper()
return link
-
+
def setup_vdu_networking(self, vdu_params):
"""
This function validates the networking/connectivity setup.
Arguments:
- vdu_params: object of RwcalYang.VDUInitParams()
+ vdu_params: object of RwcalYang.YangData_RwProject_Project_VduInitParams()
Returns:
A list of port_ids and network_ids for VDU
-
+
"""
port_args = list()
network_ids = list()
add_mgmt_net = False
- for cp in vdu_params.connection_points:
- if cp.virtual_link_id == self.driver._mgmt_network_id:
+
+ # Sorting Connection Points by given 'port_order'. If 'port_order' is not given then sorting by name.
+ # Please note that the GI Object (vdu_params.connection_points) has been converted into a dictionary object for sorting purposes.
+
+ sorted_connection_points = []
+ if vdu_params.has_field('connection_points'):
+ sorted_connection_points = sorted(vdu_params.as_dict().get('connection_points'), key=lambda k: ("port_order" not in k,
+ k.get("port_order", None), k['name']))
+
+ if vdu_params.mgmt_network is not None:
+ # Setting the mgmt network as found in vdu params.
+ mgmt_network = self.driver.neutron_drv.network_get(network_name=vdu_params.mgmt_network)['id']
+ else:
+ mgmt_network = self.driver._mgmt_network_id
+
+ for cp in sorted_connection_points:
+ if cp['virtual_link_id'] == mgmt_network:
### Remove mgmt_network_id from net_ids
add_mgmt_net = True
port_args.append(self._create_cp_args(cp))
-
if not add_mgmt_net:
- network_ids.append(self.driver._mgmt_network_id)
-
+ network_ids.append(mgmt_network)
+
### Create ports and collect port ids
if port_args:
port_ids = self.driver.neutron_multi_port_create(port_args)
else:
port_ids = list()
-
return port_ids, network_ids
-
-
+
+
def _create_cp_args(self, cp):
"""
Creates a request dictionary for port create call
Arguments:
- cp: Object of RwcalYang.VDUInitParams_ConnectionPoints()
+ cp: Object of Python Dictionary
Returns:
dict() of request params
"""
args = dict()
- args['name'] = cp.name
- args['network_id'] = cp.virtual_link_id
+ args['name'] = cp['name']
+
+ args['network_id'] = cp['virtual_link_id']
args['admin_state_up'] = True
- if cp.type_yang == 'VIRTIO' or cp.type_yang == 'E1000':
+ if cp['type_yang'] in ['VIRTIO', 'E1000', 'VPORT']:
args['binding:vnic_type'] = 'normal'
- elif cp.type_yang == 'SR_IOV':
+ elif cp['type_yang'] == 'SR_IOV':
args['binding:vnic_type'] = 'direct'
else:
- raise NotImplementedError("Port Type: %s not supported" %(cp.type_yang))
+ raise NotImplementedError("Port Type: %s not supported" %(cp['type_yang']))
try:
- if cp.static_ip_address:
- args["fixed_ips"] = [{"ip_address" : cp.static_ip_address}]
+ if cp['static_ip_address']:
+ args["fixed_ips"] = [{"ip_address" : cp['static_ip_address']}]
except Exception as e:
pass
if 'port_security_enabled' in cp:
- args['port_security_enabled'] = cp.port_security_enabled
+ args['port_security_enabled'] = cp['port_security_enabled']
- if cp.has_field('security_group'):
+ if 'security_group' in cp:
if self.driver._neutron_security_groups:
gid = self.driver._neutron_security_groups[0]['id']
args['security_groups'] = [ gid ]
+
+ if 'virtual_cps' in cp:
+ args['allowed_address_pairs'] = [ {'ip_address': vcp['ip_address'],
+ 'mac_address': vcp['mac_address']}
+ for vcp in cp['virtual_cps'] ]
+
return args
def make_virtual_link_args(self, link_params):
"""
Function to create kwargs required for neutron_network_create API
-
+
Arguments:
- link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+ link_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
Returns:
A kwargs dictionary for network operation
def make_subnet_args(self, link_params, network_id):
"""
Function to create kwargs required for neutron_subnet_create API
-
+
Arguments:
- link_params: Protobuf GI object RwcalYang.VirtualLinkReqParams()
+ link_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
Returns:
A kwargs dictionary for subnet operation
link_params.ip_profile_params.subnet_prefix_pool,
link_params.name)
raise NeutronException.NotFound("SubnetPool with name %s not found"%(link_params.ip_profile_params.subnet_prefix_pool))
-
+
kwargs['subnetpool_id'] = pools[0]
-
+
elif link_params.has_field('subnet'):
kwargs['cidr'] = link_params.subnet
else:
kwargs['gateway_ip'] = link_params.ip_profile_params.gateway_address
return kwargs
+
+ def prepare_virtual_link(self, link_params, network_id):
+ """
+ Function to create additional resources in the network during
+ network-creation process. It involves following steps
+ - Create subnets
+ - Create any virtual ports in network
+
+ Arguments:
+ link_params: Protobuf GI object RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
+ network_id: string
+
+ Returns:
+ None
+ """
+ ### Create subnet
+ kwargs = self.make_subnet_args(link_params, network_id)
+ self.driver.neutron_subnet_create(**kwargs)
+
+ ### Create Virtual connection point
+ if link_params.has_field('virtual_cps'):
+ port_args = list()
+ for vcp in link_params.virtual_cps:
+ cp = RwcalYang.YangData_RwProject_Project_VduInitParams_ConnectionPoints()
+ cp.from_dict({k:v for k,v in vcp.as_dict().items()
+ if k in ['name','security_group', 'port_security_enabled', 'static_ip_address', 'type_yang']})
+ cp.virtual_link_id = network_id
+ port_args.append(self._create_cp_args(cp.as_dict()))
+ if port_args:
+ ### Create ports
+ self.driver.neutron_multi_port_create(port_args)
+ return
import subprocess
import tempfile
import yaml
+import shlex
import gi
gi.require_version('RwCal', '1.0')
class ImageUploadError(Exception):
pass
+class PrepareVduOnBoot(Exception):
+ pass
class RwcalAccountDriver(object):
"""
Returns:
Validation Code and Details String
"""
- status = RwcalYang.CloudConnectionStatus()
+ status = RwcalYang.YangData_Rwcal_ConnectionStatus()
try:
drv = self._use_driver(account)
drv.validate_account_creds()
Returns:
The the list of images in VimResources object
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
drv = self._use_driver(account)
try:
images = drv.glance_image_list()
Returns:
Protobuf Gi object for VM
"""
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_id = vm_info['id']
vm.vm_name = vm_info['name']
vm.image_id = vm_info['image']['id']
Returns:
List containing VM information
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
drv = self._use_driver(account)
vms = drv.nova_server_list()
for vm in vms:
Returns:
List of flavors
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
drv = self._use_driver(account)
try:
flavors = drv.nova_flavor_list()
Returns:
Network info item
"""
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_name = network_info['name']
network.network_id = network_info['id']
if ('provider:network_type' in network_info) and (network_info['provider:network_type'] is not None):
Returns:
List of networks
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
drv = self._use_driver(account)
networks = drv.neutron_network_list()
for network in networks:
if network.provider_network.has_field('segmentation_id'):
kwargs['segmentation_id'] = network.provider_network.segmentation_id
- drv = self._use_driver(account)
- network_id = drv.neutron_network_create(**kwargs)
- drv.neutron_subnet_create(network_id = network_id,
- cidr = network.subnet)
+ try:
+ drv = self._use_driver(account)
+ network_id = drv.neutron_network_create(**kwargs)
+ drv.neutron_subnet_create(network_id = network_id,
+ cidr = network.subnet)
+ except Exception as e:
+ self.log.exception("Exception %s occured during create-network", str(e))
+ raise
+
return network_id
@rwstatus
Returns:
Port info item
"""
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_name = port_info['name']
port.port_id = port_info['id']
Returns:
Port info list
"""
- response = RwcalYang.VimResources()
+ response = RwcalYang.YangData_RwProject_Project_VimResources()
drv = self._use_driver(account)
ports = drv.neutron_port_list(*{})
for port in ports:
Returns:
A kwargs dictionary for glance operation
"""
-
drv = self._use_driver(account)
+ network_id = None
try:
kwargs = drv.utils.network.make_virtual_link_args(link_params)
network_id = drv.neutron_network_create(**kwargs)
- kwargs = drv.utils.network.make_subnet_args(link_params, network_id)
- drv.neutron_subnet_create(**kwargs)
+ drv.utils.network.prepare_virtual_link(link_params, network_id)
except Exception as e:
- self.log.error("Encountered exceptions during network creation. Exception: %s", str(e))
+ self.log.exception("Encountered exceptions during network creation. Exception: %s", str(e))
# This is to delete the network if neutron_subnet_create fails after creation of network
- # Note:- Any subnet created will be implicitly deleted.
- try:
- drv.neutron_network_delete(network_id)
- except Exception as delete_exception:
- self.log.debug("Exception while deleting the network after failure of neutron_subnet_create or make_subnet_args: %s", str(delete_exception))
+ # Note:- Any subnet created will be implicitly deleted.
+ if network_id is not None:
+ try:
+ drv.neutron_network_delete(network_id)
+ except Exception as delete_exception:
+ self.log.debug("Exception while deleting the network after failure of neutron_subnet_create or make_subnet_args: %s", str(delete_exception))
+ # Raising exception so that the Exception is propagated to Resmgr.
+ # This fixes the UI Stuck at Vl-Init-Phase.
+ raise Exception(str(e) + " --> " + str(delete_exception))
raise e
return network_id
link_id - id for the virtual-link
Returns:
- Object of type RwcalYang.VirtualLinkInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
drv = self._use_driver(account)
try:
raise
return virtual_link
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ """Get information about virtual link.
+
+ Arguments:
+ account - a cloud account
+ link_name - name for the virtual-link
+
+ Returns:
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
+ """
+ drv = self._use_driver(account)
+ try:
+ network = drv.neutron_network_get_by_name(link_name)
+ if network:
+ port_list = drv.neutron_port_list(**{'network_id': network['id']})
+ if 'subnets' in network and network['subnets']:
+ subnet = drv.neutron_subnet_get(network['subnets'][0])
+ else:
+ subnet = None
+ virtual_link = drv.utils.network.parse_cloud_virtual_link_info(network, port_list, subnet)
+ else:
+ return None
+ except Exception as e:
+ self.log.exception("Exception %s occured during virtual-link-get-by-name", str(e))
+ raise
+ return virtual_link
+
@rwstatus(ret_on_failure=[None])
def do_get_virtual_link_list(self, account):
"""Get information about all the virtual links
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VirtualLinkInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VirtualLinkInfoList
"""
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
drv = self._use_driver(account)
try:
networks = drv.neutron_network_list()
Arguments:
account - a cloud account
- vdu_init - information about VDU to create (RwcalYang.VDUInitParams)
+ vdu_init - information about VDU to create (RwcalYang.YangData_RwProject_Project_VduInitParams)
Returns:
The vdu_id
"""
drv = self._use_driver(account)
+ vdu_prepared = True
try:
kwargs = drv.utils.compute.make_vdu_create_args(vdu_init, account)
vm_id = drv.nova_server_create(**kwargs)
- self.prepare_vdu_on_boot(account, vm_id, vdu_init)
+ vdu_prepared_on_boot = self.prepare_vdu_on_boot(account, vm_id, vdu_init)
+ vdu_prepared = vdu_prepared_on_boot["status"]
except Exception as e:
self.log.exception("Exception %s occured during create-vdu", str(e))
raise
+
+ if vdu_prepared is False:
+ drv.utils.compute.perform_vdu_network_cleanup(vm_id)
+ drv.nova_server_delete(vm_id)
+ self.log.exception("Cleaning Up VDU as Prepare Vdu Failed : %s", vdu_prepared_on_boot["exception"])
+ raise PrepareVduOnBoot(vdu_prepared_on_boot["exception"])
return vm_id
def prepare_vdu_on_boot(self, account, server_id, vdu_params):
- cmd = PREPARE_VM_CMD.format(auth_url = account.openstack.auth_url,
- username = account.openstack.key,
- password = account.openstack.secret,
- tenant_name = account.openstack.tenant,
- region = account.openstack.region,
- user_domain = account.openstack.user_domain,
- project_domain = account.openstack.project_domain,
- mgmt_network = account.openstack.mgmt_network,
- server_id = server_id)
+ if vdu_params.mgmt_network is not None:
+ mgmt_network_param = vdu_params.mgmt_network
+ else:
+ mgmt_network_param = account.openstack.mgmt_network
+
+ # Adding shell quote to all parameters in case they contain special characters.
+ cmd = PREPARE_VM_CMD.format(auth_url = shlex.quote(account.openstack.auth_url),
+ username = shlex.quote(account.openstack.key),
+ password = shlex.quote(account.openstack.secret),
+ tenant_name = shlex.quote(account.openstack.tenant),
+ region = shlex.quote(account.openstack.region),
+ user_domain = shlex.quote(account.openstack.user_domain),
+ project_domain = shlex.quote(account.openstack.project_domain),
+ mgmt_network = shlex.quote(mgmt_network_param),
+ server_id = shlex.quote(server_id))
vol_list = list()
+
+ vdu_prepared = {"status": True, "exception": None}
if vdu_params.has_field('allocate_public_address') and vdu_params.allocate_public_address:
- cmd += " --floating_ip"
if account.openstack.has_field('floating_ip_pool'):
+ cmd += " --floating_ip"
cmd += (" --pool_name " + account.openstack.floating_ip_pool)
if vdu_params.has_field('volumes'):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp_file:
yaml.dump(vol_list, tmp_file)
cmd += (" --vol_metadata {}").format(tmp_file.name)
-
+
+
exec_path = 'python3 ' + os.path.dirname(openstack_drv.__file__)
exec_cmd = exec_path + '/' + cmd
self.log.info("Running command: %s" %(exec_cmd))
- subprocess.call(exec_cmd, shell=True)
-
+
+ # The return code for the subprocess is always 0. Hence using check_output
+ # for validating the success/failure of the script
+ # The check_output returns False or True on the basis of exception
+ # handling in prepare_vm.py
+ prepare_vm_status = subprocess.check_output(exec_cmd, shell=True)
+
+ # prepare_vm_status is a string in the format of True/False+ error message
+ # if any. This is to propagate the detailed exception to the callers.
+ vdu_status_elems = prepare_vm_status.decode("utf-8").split("+")
+ if(vdu_status_elems[0] == 'False'):
+ self.log.exception("Exception occured while preparing vdu after boot")
+ vdu_prepared = {"status": False, "exception": vdu_status_elems[1]}
+
+ return vdu_prepared
+
@rwstatus
def do_modify_vdu(self, account, vdu_modify):
"""Modify Properties of existing virtual deployment unit
Arguments:
account - a cloud account
- vdu_modify - Information about VDU Modification (RwcalYang.VDUModifyParams)
+ vdu_modify - Information about VDU Modification (RwcalYang.YangData_RwProject_Project_VduModifyParams)
"""
drv = self._use_driver(account)
### First create required number of ports aka connection points
raise
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
"""Get information about a virtual deployment unit.
Arguments:
account - a cloud account
vdu_id - id for the vdu
+ mgmt_network - mgmt_network if provided in NSD VL
Returns:
- Object of type RwcalYang.VDUInfoParams
+ Object of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
+ if mgmt_network not in [None, ""]:
+ account.openstack.mgmt_network = mgmt_network
+
drv = self._use_driver(account)
try:
vm_info = drv.nova_server_get(vdu_id)
vdu_info = drv.utils.compute.parse_cloud_vdu_info(vm_info)
except Exception as e:
- self.log.exception("Exception %s occured during get-vdu", str(e))
- raise
+ self.log.debug("Exception occured during get-vdu: %s", str(e))
+ raise
return vdu_info
- @rwstatus(ret_on_failure=[None])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
"""Get information about all the virtual deployment units
account - a cloud account
Returns:
- A list of objects of type RwcalYang.VDUInfoParams
+ A list of objects of type RwcalYang.YangData_RwProject_Project_VnfResources_VduInfoList
"""
- vnf_resources = RwcalYang.VNFResources()
+ vnf_resources = RwcalYang.YangData_RwProject_Project_VnfResources()
drv = self._use_driver(account)
try:
vms = drv.nova_server_list()
vdu = drv.utils.compute.parse_cloud_vdu_info(vm)
vnf_resources.vdu_info_list.append(vdu)
except Exception as e:
- self.log.exception("Exception %s occured during get-vdu-list", str(e))
- raise
+ self.log.debug("Exception occured during get-vdu-list: %s", str(e))
+ raise
return vnf_resources
include(rift_plugin)
-rift_install_python_plugin(rwcal_vsphere rwcal_vsphere.py)
+rift_install_gobject_python_plugin(rwcal_vsphere rwcal_vsphere.py COMPONENT ${INSTALL_COMPONENT})
@rwstatus(ret_on_failure=[None])
def do_get_virtual_link(self, account, link_id):
raise NotImplementedError()
+
+ @rwstatus(ret_on_failure=[None])
+ def do_get_virtual_link_by_name(self, account, link_name):
+ raise NotImplementedError()
@rwstatus(ret_on_failure=[""])
def do_get_virtual_link_list(self, account):
def do_delete_vdu(self, account, vdu_id):
raise NotImplementedError()
- @rwstatus(ret_on_failure=[None])
- def do_get_vdu(self, account, vdu_id):
+ @rwcalstatus(ret_on_failure=[None])
+ def do_get_vdu(self, account, vdu_id, mgmt_network):
+ # mgmt_network - Added due to need for mgmt network.
+ # TO DO: Investigate the need for aws.
raise NotImplementedError()
- @rwstatus(ret_on_failure=[""])
+ @rwcalstatus(ret_on_failure=[None])
def do_get_vdu_list(self, account):
raise NotImplementedError()
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
YANG_FILES
${source_yang_files}
${rw_cal_log_file}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEPENDS
mano-types_yang
+ rwprojectmano_yang
LIBRARIES
rwschema_yang_gen
rwyang
rwlog
rwlog-mgmt_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
)
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix rwbase;
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-yang-types {
prefix "rwt";
}
prefix "manotypes";
}
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2014-12-30 {
description
"Initial revision.";
}
+
typedef connection-status {
description "Connection status for the cloud account";
type enumeration {
grouping connection-status {
container connection-status {
config false;
- rwpb:msg-new CloudConnectionStatus;
leaf status {
type connection-status;
}
}
}
+ grouping custom-meta-data {
+ description "Grouping for instance-specific meta data";
+ list custom-meta-data {
+ description
+ "List of meta-data to be associated with the instance";
+ key "name";
+ leaf name {
+ description "Name of the meta-data parameter";
+ type string;
+ }
+
+ leaf data-type {
+ description "Data-type the meta-data parameter";
+ type manotypes:meta-data-type;
+ default "STRING";
+ }
+
+ leaf value {
+ description "Value of the meta-data parameter";
+ type string;
+ }
+
+ leaf destination {
+ description "Type of input parameter";
+ type enumeration {
+ enum "CLOUD_INIT";
+ enum "CLOUD_METADATA";
+ }
+ default "CLOUD_METADATA";
+ }
+ }
+ }
+
uses connection-status;
grouping provider-auth {
leaf mgmt-network {
type string;
- mandatory true;
}
leaf plugin-name {
}
}
}
+
+ container prop_cloud1 {
+ leaf host {
+ description "This is a single-host cloud. IP address of host";
+ type string;
+ mandatory true;
+ }
+ leaf username {
+ description "Username to access host";
+ type string;
+ }
+ leaf password {
+ description "Password for user";
+ type string;
+ }
+ leaf mgmt-network {
+ description "Name of bridge used for management access to VMs on cloud";
+ type string;
+ mandatory true;
+ }
+ leaf public-ip-pool {
+ description "Public IP pool for VMs";
+ type string;
+ mandatory true;
+ }
+ leaf wan-interface {
+ description "WAN interface name";
+ type string;
+ mandatory true;
+ }
+ leaf firewall {
+ description "Firewall services";
+ type string;
+ }
+ leaf plugin-name {
+ type string;
+ default "rwcal_brocade";
+ }
+ leaf dynamic-flavor-support {
+ type boolean;
+ default true;
+ }
+ }
}
}
-
+
+ grouping instance-timeout {
+ leaf vdu-instance-timeout {
+ description "VDU instantiation timeout";
+ type uint64;
+ default 300;
+ }
+ }
+
grouping vm-info-item {
leaf vm-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
leaf vm-size {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf flavor-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf image-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf state {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf availability-zone {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf tenant-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf host-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf management-ip {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf public-ip {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf allocate-public-address {
- rwpb:field-inline "true";
description "If this VM should allocate a floating public IP address";
type boolean;
default false;
key "ip-address";
leaf ip-address {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
key "ip-address";
leaf ip-address {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
list port-list {
key "port-id";
leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
list network-list {
key "network-id";
leaf network-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
leaf disk_format {
description "Format of the Disk";
- type disk-format;
+ type string;
default "qcow2";
}
leaf container_format {
description "Format of the container";
- type container-format;
+ type string;
default "bare";
}
container user-tags {
description "User tags associated with Image";
leaf checksum {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
+ type string;
+ }
+ }
+
+ list properties {
+ key "name";
+ leaf name {
+ description "Name of the image property";
+ type string;
+ }
+ leaf property_value {
+ description "Value of the image property";
type string;
}
}
grouping network-info-item {
leaf network-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf network-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf subnet {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
grouping port-info-item {
leaf port-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf port-state {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf network-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf ip-address {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
}
- container cloud-accounts {
- list cloud-account-list {
- rwpb:msg-new CloudAccount;
- key "name";
+ augment "/rw-project:project" {
+ container cloud-accounts {
+ list cloud-account-list {
+ key "name";
- leaf name {
- type string;
+ leaf name {
+ type string;
+ }
+ uses provider-auth;
+ uses instance-timeout;
}
- uses provider-auth;
}
}
- container vim-resources {
- rwpb:msg-new VimResources;
- config false;
-
- list vminfo-list {
- rwpb:msg-new VMInfoItem;
+ augment "/rw-project:project" {
+ container vim-resources {
config false;
- key "vm-id";
-
- uses vm-info-item;
- }
- list imageinfo-list {
- rwpb:msg-new ImageInfoItem;
- config false;
- key "id";
+ list vminfo-list {
+ config false;
+ key "vm-id";
- uses image-info-item;
- }
+ uses vm-info-item;
+ }
- list tenantinfo-list {
- rwpb:msg-new TenantInfoItem;
- config false;
- key "tenant-id";
+ list imageinfo-list {
+ config false;
+ key "id";
- leaf tenant-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ uses image-info-item;
}
- leaf tenant-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- }
+ list tenantinfo-list {
+ config false;
+ key "tenant-id";
- list userinfo-list {
- rwpb:msg-new UserInfoItem;
- config false;
- key "user-id";
+ leaf tenant-name {
+ type string;
+ }
- leaf user-name{
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ leaf tenant-id {
+ type string;
+ }
}
- leaf user-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ list userinfo-list {
+ config false;
+ key "user-id";
+
+ leaf user-name{
+ type string;
+ }
+
+ leaf user-id {
+ type string;
+ }
}
- }
- list roleinfo-list {
- rwpb:msg-new RoleInfoItem;
- config false;
- key "role-id";
+ list roleinfo-list {
+ config false;
+ key "role-id";
- leaf role-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ leaf role-name {
+ type string;
+ }
+
+ leaf role-id {
+ type string;
+ }
}
- leaf role-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ list hostinfo-list {
+ config false;
+ key "host-id";
+
+ leaf host-name {
+ type string;
+ }
+
+ leaf host-id {
+ type string;
+ }
}
- }
- list hostinfo-list {
- rwpb:msg-new HostInfoItem;
- config false;
- key "host-id";
+ list networkinfo-list {
+ config false;
+ key "network-id";
- leaf host-name {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ uses network-info-item;
}
- leaf host-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ list portinfo-list {
+ config false;
+ key "port-id";
+
+ uses port-info-item;
}
- }
- list networkinfo-list {
- rwpb:msg-new NetworkInfoItem;
- config false;
- key "network-id";
+ list flavorinfo-list {
+ config false;
+ key "id";
- uses network-info-item;
- }
+ leaf id {
+ type string;
+ }
- list portinfo-list {
- rwpb:msg-new PortInfoItem;
- config false;
- key "port-id";
+ leaf name {
+ type string;
+ }
- uses port-info-item;
+ uses manotypes:vm-flavor;
+ uses manotypes:guest-epa;
+ uses manotypes:vswitch-epa;
+ uses manotypes:hypervisor-epa;
+ uses manotypes:host-epa;
+ uses manotypes:placement-group-input;
+ }
+ }
+ }
+
+ grouping virtual-cp-info-params {
+
+ leaf connection-point-id {
+ description "Connection point id";
+ type string;
}
- list flavorinfo-list {
- rwpb:msg-new FlavorInfoItem;
- config false;
- key "id";
+ leaf name {
+ description "Name of virtual connection point";
+ type string;
+ }
- leaf id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
+ leaf ip-address {
+ description "IP address of the virtual connection point";
+ type inet:ip-address;
+ }
- leaf name {
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
+ leaf mac-address {
+ description "MAC address of the virtual connection point";
type string;
- }
-
- uses manotypes:vm-flavor;
- uses manotypes:guest-epa;
- uses manotypes:vswitch-epa;
- uses manotypes:hypervisor-epa;
- uses manotypes:host-epa;
- uses manotypes:placement-group-input;
}
}
-
+
grouping virtual-link-create-params {
leaf name {
description "Name of the Virtual-Link";
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
leaf subnet {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf associate-public-ip {
type string;
}
+ list virtual-cps {
+ key "name";
+
+ leaf name {
+ description "Name of virtual connection point";
+ type string;
+ }
+
+ leaf type {
+ description "Type of the Virtual Connection Point";
+ type manotypes:connection-point-type;
+ }
+
+ leaf security-group {
+ description "Name of the security group";
+ type string;
+ }
+
+ leaf port-security-enabled {
+ description "Enables the port security";
+ type boolean;
+ }
+
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+ }
+
uses manotypes:provider-network;
uses manotypes:ip-profile-info;
}
- container virtual-link-req-params {
- description "This object defines the parameters required to create a virtual-link";
- rwpb:msg-new VirtualLinkReqParams;
- uses virtual-link-create-params;
+ augment "/rw-project:project" {
+ container virtual-link-req-params {
+ description "This object defines the parameters required to create a virtual-link";
+ uses virtual-link-create-params;
+ }
}
-
grouping connection-point-type {
leaf type {
description
SR-IOV : Use SR-IOV interface.
E1000 : Emulate E1000 interface.
RTL8139 : Emulate RTL8139 interface.
- PCNET : Emulate PCNET interface.";
+ PCNET : Emulate PCNET interface.
+ VPORT : Virtual Port.";
type enumeration {
enum VIRTIO;
enum PCI-PASSTHROUGH;
enum E1000;
enum RTL8139;
enum PCNET;
+ enum VPORT;
}
default "VIRTIO";
}
grouping vdu-create-params {
leaf name {
description "Name of the VDU";
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
pass as metadata during the VM creation.";
type string;
}
-
+
uses manotypes:vm-flavor;
uses manotypes:guest-epa;
uses manotypes:vswitch-epa;
uses manotypes:host-epa;
leaf node-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf flavor-id {
description "CAL assigned flavor-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf image-id {
description "CAL assigned image-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf image-name {
description "Image name which can be used to lookup the image-id";
type string;
- rwpb:field-inline "true";
- rwpb:field-string-max 256;
}
leaf image-checksum {
description "Image md5sum checksum used in combination with image name to lookup image-id ";
type string;
- rwpb:field-inline "true";
- rwpb:field-string-max 32;
}
uses manotypes:placement-group-input;
description "Name of the security group";
type string;
}
+
leaf port-security-enabled {
description "Enables the port security";
type boolean;
}
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
+
+ leaf port-order {
+ description "Port Sequence Order";
+ type uint32;
+ }
+
+ list virtual_cps {
+ key "name";
+ uses virtual-cp-info-params;
+ }
+
uses connection-point-type;
}
}
}
- uses manotypes:supplemental-boot-data;
+ container supplemental-boot-data {
+ uses manotypes:config-file;
+ leaf boot-data-drive {
+ description "Some VIMs implement additional drives to host config-files or meta-data";
+ type boolean;
+ default false;
+ }
+ uses custom-meta-data;
+ }
list volumes {
key "name";
description "Name of the disk-volumes, e.g. vda, vdb etc";
type string;
}
- uses manotypes:volume-info;
- }
- }
- container vdu-init-params {
- description "This object defines the parameters required to create a VDU";
- rwpb:msg-new VDUInitParams;
- uses vdu-create-params;
- }
+ leaf description {
+ description "Description for Volume";
+ type string;
+ }
- container vdu-modify-params {
- description "This object defines the parameters required to modify VDU";
- rwpb:msg-new VDUModifyParams;
+ leaf size {
+ description "Size of disk in GB";
+ type uint64;
+ }
- leaf vdu-id {
- description "CAL assigned id for VDU to which this connection point belongs";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
+ choice volume-source {
+ description
+ "Defines the source of the volume. Possible options are
+ 1. Ephemeral -- Empty disk
+ 2. Image -- Refer to image to be used for volume
+ 3. Volume -- Reference of pre-existing volume to be used
+ ";
+
+ case ephemeral {
+ leaf ephemeral {
+ type empty;
+ }
+ }
- leaf image-id {
- description "CAL assigned image-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
+ case image {
+ uses manotypes:image-properties;
+ }
- list connection-points-add {
- key "name";
- leaf name {
- description "Name of the connection point";
- type string;
+ case volume {
+ leaf volume-ref {
+ description "Reference for pre-existing volume in VIM";
+ type string;
+ }
+ }
}
- leaf virtual-link-id {
- description "CAL assigned resource Id for the Virtual Link";
- type string;
+
+ leaf device-bus {
+ description "Type of disk-bus on which this disk is exposed to guest";
+ type enumeration {
+ enum ide;
+ enum usb;
+ enum virtio;
+ enum scsi;
+ }
}
- leaf associate-public-ip {
- type boolean;
- default false;
+
+ leaf device-type {
+ description "The type of device as exposed to guest";
+ type enumeration {
+ enum disk;
+ enum cdrom;
+ enum floppy;
+ enum lun;
+ }
}
- leaf port-security-enabled {
- description "Enables the port security";
+
+ leaf boot-volume {
+ description "This flag indicates if this is boot volume or not";
type boolean;
}
- uses connection-point-type;
+ leaf boot-priority {
+ description "Boot priority associated with volume";
+ type int32;
+ }
+ }
+ }
+
+ augment "/rw-project:project" {
+ container vdu-init-params {
+ description "This object defines the parameters required to create a VDU";
+ uses vdu-create-params;
}
+ }
+
+ augment "/rw-project:project/vdu-init-params/vm-flavor" {
+ uses manotypes:vm-flavor-name;
+ }
+
+ augment "/rw-project:project" {
+ container vdu-modify-params {
+ description "This object defines the parameters required to modify VDU";
+
+ leaf vdu-id {
+ description "CAL assigned id for VDU to which this connection point belongs";
+ type string;
+ }
+
+ leaf static-ip-address {
+ description "Static IP address for the connection point";
+ type inet:ip-address;
+ }
- list connection-points-remove {
- key "connection-point-id";
- leaf connection-point-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
+ uses connection-point-type;
+
+ leaf image-id {
+ description "CAL assigned image-id for the VDU image";
type string;
}
+
+ list connection-points-add {
+ key "name";
+ leaf name {
+ description "Name of the connection point";
+ type string;
+ }
+ leaf virtual-link-id {
+ description "CAL assigned resource Id for the Virtual Link";
+ type string;
+ }
+ leaf associate-public-ip {
+ type boolean;
+ default false;
+ }
+ leaf port-security-enabled {
+ description "Enables the port security";
+ type boolean;
+ }
+
+ uses connection-point-type;
+ }
+
+ list connection-points-remove {
+ key "connection-point-id";
+ leaf connection-point-id {
+ type string;
+ }
+ }
}
}
grouping connection-point-info-params {
leaf connection-point-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf virtual-link-id {
description "CAL assigned resource ID of the Virtual-Link";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf vdu-id {
description "CAL assigned id for VDU to which this connection point belongs";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
leaf ip-address {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ type inet:ip-address;
}
leaf public-ip {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf mac-addr {
- rwpb:field-inline "true";
- rwpb:field-string-max 48;
type string;
}
+
+ leaf port-order {
+ description "Port Sequence Order";
+ type uint32;
+ }
+
+ list virtual-cp-info {
+ key "ip-address";
+
+ leaf ip-address {
+ type inet:ip-address;
+ }
+
+ leaf mac-address {
+ type string;
+ }
+ }
}
grouping virtual-link-info-params {
leaf name {
description "Name of the Virtual-Link";
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
leaf virtual-link-id {
description "CAL assigned resource ID of the Virtual-Link";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
uses connection-point-info-params;
}
+ list virtual-connection-points {
+ key connection-point-id;
+ uses virtual-cp-info-params;
+ }
+
leaf subnet {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
grouping vdu-info-params {
leaf vdu-id {
description "CAL assigned id for VDU";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf name {
description "Name of the VDU";
- rwpb:field-inline "true";
- rwpb:field-string-max 255;
type string;
}
leaf flavor-id {
description "CAL assigned flavor-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf image-id {
description "CAL assigned image-id for the VDU image";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf node-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
}
leaf management-ip {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
leaf public-ip {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
description "Console URL from the VIM, if available";
}
- uses manotypes:supplemental-boot-data;
+ container supplemental-boot-data {
+ uses manotypes:config-file;
+ leaf boot-data-drive {
+ description "Some VIMs implement additional drives to host config-files or meta-data";
+ type boolean;
+ default false;
+ }
+ uses custom-meta-data;
+ }
list volumes {
key "name";
leaf volume-id {
description "CAL assigned volume-id ";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
}
+
+ uses custom-meta-data;
}
}
- container vnf-resources {
- rwpb:msg-new VNFResources;
- config false;
-
- list virtual-link-info-list {
- rwpb:msg-new VirtualLinkInfoParams;
+ augment "/rw-project:project" {
+ container vnf-resources {
config false;
- key virtual-link-id;
- uses virtual-link-info-params;
- }
- list vdu-info-list {
- rwpb:msg-new VDUInfoParams;
- config false;
- key vdu-id;
- uses vdu-info-params;
+ list virtual-link-info-list {
+ config false;
+ key virtual-link-id;
+ uses virtual-link-info-params;
+ }
+
+ list vdu-info-list {
+ config false;
+ key vdu-id;
+ uses vdu-info-params;
+ }
}
}
}
def upload_image(self, location, name=None):
"""Onboard image to cloudsim server."""
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = name or os.path.basename(location)
image.location = location
image.disk_format = "qcow2"
import functools
import gi
gi.require_version('RwTypes', '1.0')
+gi.require_version('RwCal', '1.0')
from gi.repository import RwTypes, RwCal
),
),
+ (r"/api/get_virtual_link_by_name", CalRequestHandler,
+ mk_attrs(
+ cal_method="get_virtual_link_by_name",
+ input_params=[
+ RPCParam("link_name"),
+ ],
+ output_params=[
+ RPCParam("response", "VirtualLinkInfoParams"),
+ ],
+ ),
+ ),
+
(r"/api/get_virtual_link_list", CalRequestHandler,
mk_attrs(
cal_method="get_virtual_link_list",
"""Start the server."""
cal = self.get_cal_interface()
- account = RwcalYang.CloudAccount(account_type="cloudsim")
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(account_type="cloudsim")
tornado.platform.asyncio.AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
rw_vx_plugin
peas-1.0)
-add_dependencies(rwcal_api rwmanifest_yang.headers)
+install(TARGETS rwcal_api LIBRARY DESTINATION usr/lib COMPONENT ${INSTALL_COMPONENT})
-install(TARGETS rwcal_api LIBRARY DESTINATION usr/lib COMPONENT ${PKG_LONG_NAME})
-
-install(PROGRAMS rwvim.py DESTINATION usr/bin COMPONENT ${PKG_LONG_NAME})
+install(PROGRAMS rwvim.py DESTINATION usr/bin COMPONENT ${INSTALL_COMPONENT})
RIFT.ware-ready.py
openstack_resources.py
DESTINATION usr/bin
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
"""
Returns AWS cal account
"""
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.account_type = "aws"
account.aws.key = kwargs['key']
account.aws.secret = kwargs['secret']
Create Mission Control VM in AWS
"""
logger.info("Creating mission control VM")
- vdu = RwcalYang.VDUInitParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu.name = MISSION_CONTROL_NAME
vdu.image_id = RIFT_IMAGE_AMI
vdu.flavor_id = 'c3.large'
inst=driver.get_instance(self._mc_id)
inst.wait_until_running()
- rc,rs =self._cal.get_vdu(self._acct,self._mc_id)
+ rc,rs =self._cal.get_vdu(self._acct,self._mc_id, "")
assert rc == RwStatus.SUCCESS
self._mc_public_ip = rs.public_ip
self._mc_private_ip = rs.management_ip
salt_master=self._mc_private_ip
node_id = str(uuid.uuid4())
- vdu = RwcalYang.VDUInitParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu.name = LAUNCHPAD_NAME
vdu.image_id = RIFT_IMAGE_AMI
vdu.flavor_id = 'c3.xlarge'
inst=driver.get_instance(self._lp_id)
inst.wait_until_running()
- rc,rs =self._cal.get_vdu(self._acct,self._lp_id)
+ rc,rs =self._cal.get_vdu(self._acct,self._lp_id, "")
assert rc == RwStatus.SUCCESS
self._lp_public_ip = rs.public_ip
PROGRAMS
cal_module_test
DESTINATION usr/rift/systemtest/cal_module_test
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
pytest/conftest.py
pytest/cal_module_test.py
DESTINATION usr/rift/systemtest/cal_module_test/pytest
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
racfg/cal_module_test.racfg
DESTINATION
usr/rift/systemtest/cal_module_test
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
import pytest
+import rift.auto.mano
+
from gi import require_version
require_version('RwCal', '1.0')
logger = logging.getLogger('rwcal')
logging.basicConfig(level=logging.INFO)
+def short_id():
+ return uuid.uuid4().hex[:10]
class CloudConfig(object):
def __init__(self, cal, account):
Return:
CloudAccount details
"""
- account = RwcalYang.CloudAccount.from_dict({
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict({
"account_type": "aws",
"aws": {
"key": option.aws_user,
Returns:
FlavorInfoItem
"""
- flavor = RwcalYang.FlavorInfoItem.from_dict({
- "name": str(uuid.uuid4()),
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"vm_flavor": {
"memory_mb": 1024,
"vcpu_count": 1,
Returns:
VDUInitParams
"""
- vdu = RwcalYang.VDUInitParams.from_dict({
- "name": str(uuid.uuid4()),
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"node_id": "123456789012345",
"image_id": self.image_id,
"flavor_id": "t2.micro"
})
c1 = vdu.connection_points.add()
- c1.name = str(uuid.uuid4())
+ c1.name = rift.auto.mano.resource_name(short_id())
c1.virtual_link_id = self.virtual_link_id
return vdu
Returns:
VirtualLinkReqParams
"""
- vlink = RwcalYang.VirtualLinkReqParams.from_dict({
- "name": str(uuid.uuid4()),
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"subnet": '172.31.64.0/20',
})
Return:
CloudAccount details
"""
- account = RwcalYang.CloudAccount.from_dict({
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict({
'name': "cloudsim",
'account_type':'cloudsim_proxy'})
Returns:
ImageInfoItem
"""
- image = RwcalYang.ImageInfoItem.from_dict({
- "name": str(uuid.uuid4()),
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
"disk_format": "qcow2",
"container_format": "bare",
Returns:
FlavorInfoItem
"""
- flavor = RwcalYang.FlavorInfoItem.from_dict({
- "name": str(uuid.uuid4()),
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"vm_flavor": {
"memory_mb": 16392,
"vcpu_count": 4,
Returns:
VDUInitParams
"""
- vdu = RwcalYang.VDUInitParams.from_dict({
- "name": str(uuid.uuid4()),
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"node_id": "123456789012345",
"image_id": self.image_id,
"flavor_id": self.flavor_id,
})
c1 = vdu.connection_points.add()
- c1.name = str(uuid.uuid4())
+ c1.name = rift.auto.mano.resource_name(short_id())
c1.virtual_link_id = self.virtual_link_id
return vdu
Returns:
VirtualLinkReqParams
"""
- vlink = RwcalYang.VirtualLinkReqParams.from_dict({
- "name": str(uuid.uuid4()),
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"subnet": '192.168.1.0/24',
})
Returns:
CloudAccount
"""
- acct = RwcalYang.CloudAccount.from_dict({
+ acct = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict({
"account_type": "openstack",
"openstack": {
"key": option.os_user,
Returns:
ImageInfoItem
"""
- image = RwcalYang.ImageInfoItem.from_dict({
- "name": str(uuid.uuid4()),
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"location": os.path.join(os.getenv("RIFT_ROOT"), "images/rift-root-latest.qcow2"),
"disk_format": "qcow2",
"container_format": "bare",
Returns:
FlavorInfoItem
"""
- flavor = RwcalYang.FlavorInfoItem.from_dict({
- "name": str(uuid.uuid4()),
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"vm_flavor": {
"memory_mb": 16392,
"vcpu_count": 4,
node = flavor.guest_epa.numa_node_policy.node.add()
node.id = i
if i == 0:
- node.vcpu = [0, 1]
+ vcpu0 = node.vcpu.add()
+ vcpu0.id = 0
+ vcpu1 = node.vcpu.add()
+ vcpu1.id = 1
elif i == 1:
- node.vcpu = [2, 3]
+ vcpu2 = node.vcpu.add()
+ vcpu2.id = 2
+ vcpu3 = node.vcpu.add()
+ vcpu3.id = 3
node.memory_mb = 8196
dev = flavor.guest_epa.pcie_device.add()
Returns:
VDUInitParams
"""
- vdu = RwcalYang.VDUInitParams.from_dict({
- "name": str(uuid.uuid4()),
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"node_id": "123456789012345",
"image_id": self.image_id,
"flavor_id": self.flavor_id,
})
c1 = vdu.connection_points.add()
- c1.name = str(uuid.uuid4())
+ c1.name = rift.auto.mano.resource_name(short_id())
c1.virtual_link_id = self.virtual_link_id
return vdu
Returns:
VirtualLinkReqParams
"""
- vlink = RwcalYang.VirtualLinkReqParams.from_dict({
- "name": str(uuid.uuid4()),
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams.from_dict({
+ "name": rift.auto.mano.resource_name(short_id()),
"subnet": '192.168.1.0/24',
})
ids = []
for vdu in vdus.vdu_info_list:
- status, vdu_single = cal.get_vdu(account, vdu.vdu_id)
+ status, vdu_single = cal.get_vdu(account, vdu.vdu_id, "")
assert status == RwStatus.SUCCESS
assert vdu_single.vdu_id == vdu.vdu_id
ids.append(vdu.vdu_id)
account = cloud_config.account
cal = cloud_config.cal
- vdu_modify = RwcalYang.VDUModifyParams()
+ vdu_modify = RwcalYang.YangData_RwProject_Project_VduModifyParams()
vdu_modify.vdu_id = cloud_config.vdu_id
c1 = vdu_modify.connection_points_add.add()
c1.name = "c_modify1"
"target_vm":"VM",
"test_description":"System test targeting module tests for CAL accounts",
"run_as_root": true,
- "status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "status":"broken",
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2400,
"networks":[],
"vms":[
for network in netlist.networkinfo_list:
print(network)
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_name = vm_name
vm.flavor_id = size.id
vm.image_id = image.id
nets = dict()
for network in netlist.networkinfo_list:
if network.network_name != "public":
- nwitem = RwcalYang.VMInfoItem_NetworkList()
+ nwitem = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList_NetworkList()
nwitem.network_id = network.network_id
nets[network.network_name] = nwitem
print("creating image \"%s\" using %s ..." % \
(cmdargs.image_name, cmdargs.file_name))
- img = RwcalYang.ImageInfoItem()
+ img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
img.name = cmdargs.image_name
img.location = cmdargs.file_name
img.disk_format = "qcow2"
print(flv)
elif cmdargs.which == 'create':
account.openstack.key = 'admin'
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = cmdargs.flavor_name
flavor.vm_flavor.memory_mb = cmdargs.memory_size
flavor.vm_flavor.vcpu_count = cmdargs.vcpu_count
if cmdargs.provider_type == 'OPENSTACK':
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.account_type = "openstack"
account.openstack.key = cmdargs.user
account.openstack.secret = cmdargs.passwd
kwds = {'subnet_id': __default_subnet__}
else:
kwds = {'network_interfaces': net_ifs}
- print net_ifs
+ print(net_ifs)
new_reservation = self._conn.run_instances(
image_id=self._ami,
addr = "%s.%s.10%d.0/25" % (subnet_addrs_split[0], subnet_addrs_split[1], i)
try:
subnet = vpc_conn.create_subnet(vpc.id, addr)
- except boto.exception.EC2ResponseError, e:
+ except boto.exception.EC2ResponseError as e:
if 'InvalidSubnet.Conflict' == e.error_code:
subnet = vpc_conn.get_all_subnets(filters=[('vpcId', vpc.id), ('cidrBlock', addr)])[0]
else:
"""
Returns cal account
"""
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.account_type = "openstack"
account.openstack.key = openstack_info['username']
account.openstack.secret = openstack_info['password']
Creates a VM. The VM name is derived from username
"""
- vm = RwcalYang.VDUInitParams()
+ vm = RwcalYang.YangData_RwProject_Project_VduInitParams()
vm.name = name
vm.flavor_id = self._flavor_id
vm.image_id = self._image_id
def create_network(self, name):
logger.info("Creating network with name: %s" %name)
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_name = name
network.subnet = openstack_info['subnets'][openstack_info['subnet_index']]
def create_image(self, location):
- img = RwcalYang.ImageInfoItem()
+ img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
img.name = basename(location)
img.location = location
img.disk_format = "qcow2"
"""
Create Flavor suitable for rift_ping_pong VNF
"""
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = FLAVOR_NAME
flavor.vm_flavor.memory_mb = 16384 # 16GB
flavor.vm_flavor.vcpu_count = 4
--- /dev/null
+#!/usr/bin/python3
+
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import sys
+import rw_peas
+from gi import require_version
+require_version('RwCal', '1.0')
+
+from gi.repository import RwcalYang
+from gi.repository.RwTypes import RwStatus
+import argparse
+import logging
+import rwlogger
+
+persistent_resources = {
+ 'vms' : [],
+ 'networks' : [],
+}
+
+
+RIFT_IMAGE_AMI = 'ami-7070231a'
+
+logging.basicConfig(level=logging.ERROR)
+logger = logging.getLogger('rift.cal.awsresources')
+logger.setLevel(logging.INFO)
+
+def get_cal_plugin():
+ """
+ Load AWS cal plugin
+ """
+ plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+ engine, info, extension = plugin()
+ cal = plugin.get_interface("Cloud")
+ rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+ try:
+ rc = cal.init(rwloggerctx)
+ assert rc == RwStatus.SUCCESS
+ except Exception as e:
+ logger.error("ERROR:Cal plugin instantiation failed with exception %s", repr(e))
+ else:
+ logger.info("AWS Cal plugin successfully instantiated")
+ return cal
+
+def get_cal_account(**kwargs):
+ """
+ Returns AWS cal account
+ """
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
+ account.account_type = "aws"
+ account.aws.key = kwargs['key']
+ account.aws.secret = kwargs['secret']
+ account.aws.region = kwargs['region']
+ if 'ssh_key' in kwargs and kwargs['ssh_key'] is not None:
+ account.aws.ssh_key = kwargs['ssh_key']
+ account.aws.availability_zone = kwargs['availability_zone']
+ if 'vpcid' in kwargs and kwargs['vpcid'] is not None:
+ account.aws.vpcid = kwargs['vpcid']
+ if 'default_subnet_id' in kwargs and kwargs['default_subnet_id'] is not None:
+ account.aws.default_subnet_id = kwargs['default_subnet_id']
+ return account
+
+class AWSResources(object):
+ """
+ Class with methods to manage AWS resources
+ """
+ def __init__(self, **kwargs):
+ self._cal = get_cal_plugin()
+ self._acct = get_cal_account(**kwargs)
+
+ def get_image_list(self):
+ """
+ Get Image list
+ """
+ logger.info("Initiating Get image list")
+ rc, rsp = self._cal.get_image_list(self._acct)
+
+ print("Return resp: rsp ", rsp)
+
+ logger.info("Get image list complete")
+
+
+ def get_image(self, image_ami_id):
+ """
+ Get Image from AMI id
+ """
+ logger.info("Initiating Get image")
+ rc, rsp = self._cal.get_image(self._acct, image_ami_id)
+
+ print("Return code: rc ", rc)
+ print("Return resp: rsp ", rsp)
+
+ logger.info("Get image complete")
+
+
+
+def main():
+ """
+ Main routine
+
+ New AWS credentials were created as follows:
+ User: aws_riftio
+ Access Key ID: AKIAJQ4D3X5WO3P6JXKA
+ Secret Access key: 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
+ Pasword: RhN*q2ze*fpY
+
+ The following AWS cloud account config can be used on LP CLI:
+ cloud account AWS account-type aws aws key AKIAJQ4D3X5WO3P6JXKA secret 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 region us-east-1 vpcid vpc-cb1cd2af ssh-key rift-awskey availability-zone us-east-1c default-subnet-id subnet-73796d04 plugin-name rwcal_aws dynamic-flavor-support true`
+ """
+ parser = argparse.ArgumentParser(description='Script to manage AWS resources')
+
+ parser.add_argument('--aws-key',
+ action = 'store',
+ dest = 'aws_key',
+ type = str,
+ help='AWS key')
+
+ parser.add_argument('--aws-secret',
+ action = 'store',
+ dest = 'aws_secret',
+ type = str,
+ help='AWS secret')
+
+ parser.add_argument('--aws-region',
+ action = 'store',
+ dest = 'aws_region',
+ type = str,
+ help='AWS region')
+
+ parser.add_argument('--aws-az',
+ action = 'store',
+ dest = 'aws_az',
+ type = str,
+ help='AWS Availability zone')
+
+ parser.add_argument('--aws-sshkey',
+ action = 'store',
+ dest = 'aws_sshkey',
+ type = str,
+ help='AWS SSH Key to login to instance')
+
+ parser.add_argument('--aws-vpcid',
+ action = 'store',
+ dest = 'aws_vpcid',
+ type = str,
+ help='AWS VPC ID to use to indicate non default VPC')
+
+ parser.add_argument('--aws-default-subnet',
+ action = 'store',
+ dest = 'aws_default_subnet',
+ type = str,
+ help='AWS Default subnet id in VPC to be used for mgmt network')
+
+ argument = parser.parse_args()
+
+ '''
+ User: aws_riftio
+ Access Key ID: AKIAJQ4D3X5WO3P6JXKA
+ Secret Access key: 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
+ Pasword: RhN*q2ze*fpY
+
+ cloud account AWS account-type aws aws key AKIAJQ4D3X5WO3P6JXKA secret 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 region us-east-1 vpcid vpc-cb1cd2af ssh-key rift-awskey availability-zone us-east-1c default-subnet-id subnet-73796d04 plugin-name rwcal_aws dynamic-flavor-support true
+ '''
+
+ argument.aws_key = "AKIAJQ4D3X5WO3P6JXKA"
+ argument.aws_secret = "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"
+ argument.aws_region = "us-east-1"
+ argument.aws_az = "us-east-1c"
+ argument.aws_sshkey = "rift-awskey"
+ argument.aws_vpcid = "vpc-cb1cd2af"
+ argument.aws_default_subnet = "subnet-73796d04"
+
+ if (argument.aws_key is None or argument.aws_secret is None or argument.aws_region is None or
+ argument.aws_az is None):
+ logger.error("Missing mandatory params. AWS Key, Secret, Region, AZ and SSH key are mandatory params")
+ sys.exit(-1)
+
+
+ ### Start processing
+ logger.info("Instantiating cloud-abstraction-layer")
+ drv = AWSResources(key=argument.aws_key, secret=argument.aws_secret, region=argument.aws_region, availability_zone = argument.aws_az,
+ ssh_key = argument.aws_sshkey, vpcid = argument.aws_vpcid, default_subnet_id = argument.aws_default_subnet)
+ logger.info("Instantiating cloud-abstraction-layer.......[Done]")
+
+ logger.info("Testing image list APIs")
+ drv.get_image_list()
+ logger.info("Finished testing image list APIs")
+
+ logger.info("Testing get image APIs for rift ping image - Present in Owner account")
+ drv.get_image('ami-eb0a5f81')
+ logger.info("Finished testing get image APIs")
+
+ logger.info("Testing get image APIs for public vyos image")
+ drv.get_image('ami-9ea315f6')
+ logger.info("Finished testing get image APIs")
+
+ logger.info("Testing get image APIs for public PalotAlto FW image")
+ drv.get_image('ami-34ca984f')
+ logger.info("Finished testing get image APIs")
+
+
+if __name__ == '__main__':
+ main()
# The account object is not currently used, but it is required by the CAL
# interface, so we create an empty object here to represent it.
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.account_type = "lxc"
# Make sure that any containers that were previously created have been
logger.info(args.rootfs)
# Create an image that can be used to create VMs
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = 'rift-master'
image.lxc.size = '2.5G'
image.lxc.template_path = template
# Create a VM
vms = []
for index in range(args.num_vms):
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_name = 'rift-s{}'.format(index + 1)
vm.image_id = image.id
vms.append(vm)
# Create the default and data networks
- network = RwcalYang.NetworkInfoItem(network_name='virbr0')
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList(network_name='virbr0')
cal.create_network(account, network)
os.system('/usr/sbin/brctl show')
# Create pairs of ports to connect the networks
for index, vm in enumerate(vms):
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_name = "eth0"
port.network_id = network.network_id
port.vm_id = vm.vm_id
def _get_cal_account(self):
"""
- Creates an object for class RwcalYang.CloudAccount()
+ Creates an object for class RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
"""
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.account_type = "openstack"
account.openstack.key = "{}_user".format(self.test_prefix)
account.openstack.secret = "mypasswd"
# Important information about openstack installation. This needs to be manually verified
#
openstack_info = {
- 'username' : 'xxxx',
- 'password' : 'xxxxxx',
- 'auth_url' : 'http://10.66.4.19:5000/v2.0/',
+ 'username' : 'xxxxxx',
+ 'password' : 'xxxxx',
+ 'auth_url' : 'http://10.66.4.102:5000/v2.0/',
'project_name' : 'xxxxx',
'mgmt_network' : 'private',
'reserved_flavor' : 'm1.medium',
def get_cal_account():
"""
- Creates an object for class RwcalYang.CloudAccount()
+ Creates an object for class RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
"""
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
account.name = "Gruntxx"
account.account_type = "openstack"
account.openstack.key = openstack_info['username']
def _get_image_info_request(self):
"""
- Returns request object of type RwcalYang.ImageInfoItem()
+ Returns request object of type RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
"""
- img = RwcalYang.ImageInfoItem()
+ img = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
img.name = "rift.cal.unittest.image"
img.location = '/net/sharedfiles/home1/common/vm/rift-root-latest.qcow2'
img.disk_format = "qcow2"
def _get_flavor_info_request(self):
"""
- Returns request object of type RwcalYang.FlavorInfoItem()
+ Returns request object of type RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
"""
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = 'rift.cal.unittest.flavor'
flavor.vm_flavor.memory_mb = 16384 # 16GB
flavor.vm_flavor.vcpu_count = 4
"""
Returns request object of type RwcalYang.VMInfoItem
"""
- vm = RwcalYang.VMInfoItem()
+ vm = RwcalYang.YangData_RwProject_Project_VimResources_VminfoList()
vm.vm_name = 'rift.cal.unittest.vm'
vm.flavor_id = flavor_id
vm.image_id = image_id
def _get_network_info_request(self):
"""
- Returns request object of type RwcalYang.NetworkInfoItem
+ Returns request object of type RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList
"""
- network = RwcalYang.NetworkInfoItem()
+ network = RwcalYang.YangData_RwProject_Project_VimResources_NetworkinfoList()
network.network_name = 'rift.cal.unittest.network'
network.subnet = '192.168.16.0/24'
if openstack_info['physical_network']:
def _get_port_info_request(self, network_id, vm_id):
"""
- Returns an object of type RwcalYang.PortInfoItem
+ Returns an object of type RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList
"""
- port = RwcalYang.PortInfoItem()
+ port = RwcalYang.YangData_RwProject_Project_VimResources_PortinfoList()
port.port_name = 'rift.cal.unittest.port'
port.network_id = network_id
if vm_id != None:
"""
logger.info("Openstack-CAL-Test: Test Get VDU List APIs")
rc, rsp = self.cal.get_vdu_list(self._acct)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Received %d VDUs" %(len(rsp.vdu_info_list)))
for vdu in rsp.vdu_info_list:
- rc, vdu2 = self.cal.get_vdu(self._acct, vdu.vdu_id)
+ rc, vdu2 = self.cal.get_vdu(self._acct, vdu.vdu_id, "")
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
self.assertEqual(vdu2.vdu_id, vdu.vdu_id)
def _get_virtual_link_request_info(self):
"""
- Returns object of type RwcalYang.VirtualLinkReqParams
+ Returns object of type RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams
"""
- vlink = RwcalYang.VirtualLinkReqParams()
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
vlink.name = 'rift.cal.virtual_link'
vlink.subnet = '192.168.1.0/24'
if openstack_info['physical_network']:
def _get_vdu_request_info(self, virtual_link_id):
"""
- Returns object of type RwcalYang.VDUInitParams
+ Returns object of type RwcalYang.YangData_RwProject_Project_VduInitParams
"""
- vdu = RwcalYang.VDUInitParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu.name = "cal.vdu"
vdu.node_id = OpenStackTest.NodeID
vdu.image_id = self._image.id
def _get_vdu_modify_request_info(self, vdu_id, virtual_link_id):
"""
- Returns object of type RwcalYang.VDUModifyParams
+ Returns object of type RwcalYang.YangData_RwProject_Project_VduModifyParams
"""
- vdu = RwcalYang.VDUModifyParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduModifyParams()
vdu.vdu_id = vdu_id
c1 = vdu.connection_points_add.add()
c1.name = "c_modify1"
def _get_rbsh_vdu_request_info(self, vlink_list):
"""
- Returns object of type RwcalYang.VDUInitParams
+ Returns object of type RwcalYang.YangData_RwProject_Project_VduInitParams
"""
- vdu = RwcalYang.VDUInitParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu.name = "cal_rbsh_vdu"
vdu.vm_flavor.memory_mb = 2048
vdu.vm_flavor.vcpu_count = 1
logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
vlink_list = []
for ctr in range(3):
- vlink = RwcalYang.VirtualLinkReqParams()
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
vlink.name = 'rift.cal.virtual_link' + str(ctr)
vlink.subnet = '11.0.{}.0/24'.format(str(1 + ctr))
test_vdu_id = rsp
## Check if VDU get is successful
- rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
+ rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id, "")
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.debug("Get VDU response %s", rsp)
self.assertEqual(rsp.vdu_id, test_vdu_id)
vdu_state = 'inactive'
cp_state = 'inactive'
for i in range(15):
- rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id, "")
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: Iter %d VDU with id : %s. Reached State : %s, mgmt ip %s" %(i, test_vdu_id, rsp.state, rsp.management_ip))
if (rsp.state == 'active') and ('management_ip' in rsp) and ('public_ip' in rsp):
vdu_state = 'active'
### Check vdu list as well
rc, rsp = self.cal.get_vdu_list(self._acct)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
found = False
logger.debug("Get VDU response %s", rsp)
for vdu in rsp.vdu_info_list:
self.assertEqual(found, True)
logger.info("Openstack-CAL-Test: Passed VDU list" )
- @unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
+ #@unittest.skip("Skipping test_create_delete_virtual_link_and_vdu")
def test_create_delete_virtual_link_and_vdu(self):
"""
Test to create VDU
vdu_id = rsp
## Check if VDU create is successful
- rc, rsp = self.cal.get_vdu(self._acct, rsp)
+ rc, rsp = self.cal.get_vdu(self._acct, rsp, "")
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
self.assertEqual(rsp.vdu_id, vdu_id)
### Wait until vdu_state is active
for i in range(50):
- rc, rs = self.cal.get_vdu(self._acct, vdu_id)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ rc, rs = self.cal.get_vdu(self._acct, vdu_id, "")
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: VDU with id : %s. Reached State : %s" %(vdu_id, rs.state))
if rs.state == 'active':
break
- rc, rs = self.cal.get_vdu(self._acct, vdu_id)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ rc, rs = self.cal.get_vdu(self._acct, vdu_id, "")
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
self.assertEqual(rs.state, 'active')
logger.info("Openstack-CAL-Test: VDU with id : %s reached expected state : %s" %(vdu_id, rs.state))
logger.info("Openstack-CAL-Test: VDUInfo: %s" %(rs))
time.sleep(5)
### Verify that VDU and virtual link are successfully deleted
rc, rsp = self.cal.get_vdu_list(self._acct)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
for vdu in rsp.vdu_info_list:
self.assertNotEqual(vdu.vdu_id, vdu_id)
def _get_vol_vdu_request_info(self, vlink_list):
"""
- Returns object of type RwcalYang.VDUInitParams
+ Returns object of type RwcalYang.YangData_RwProject_Project_VduInitParams
"""
- vdu = RwcalYang.VDUInitParams()
+ vdu = RwcalYang.YangData_RwProject_Project_VduInitParams()
vdu.name = "cal_vdu"
vdu.vm_flavor.memory_mb = 512
vdu.vm_flavor.vcpu_count = 1
"""
logger.info("Openstack-CAL-Test: Test Create Virtual Link API")
vlink_list = []
- vlink = RwcalYang.VirtualLinkReqParams()
+ vlink = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
vlink.name = 'rift.cal.virtual_link'
vlink.subnet = '11.0.1.0/24'
test_vdu_id = rsp
## Check if VDU get is successful
- rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
+ rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id, "")
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+
logger.debug("Get VDU response %s", rsp)
self.assertEqual(rsp.vdu_id, test_vdu_id)
vdu_state = 'inactive'
cp_state = 'inactive'
for i in range(5):
- rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ rc, rsp = self.cal.get_vdu(self._acct, test_vdu_id, "")
+ # Make changes for the third argument (currently None for mgmt_network).
+ # This is the mgmt_network published in the vdur (vdu.mgmt_network).
+ # Pass accordingly as per the use case of the test.
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
logger.info("Openstack-CAL-Test: VDU with id : %s. Reached State : %s, mgmt ip %s" %(test_vdu_id, rsp.state, rsp.management_ip))
if (rsp.state == 'active') and ('management_ip' in rsp) and ('public_ip' in rsp):
vdu_state = 'active'
### Check vdu list as well
rc, rsp = self.cal.get_vdu_list(self._acct)
- self.assertEqual(rc, RwStatus.SUCCESS)
+ self.assertEqual(rc.status, RwStatus.SUCCESS)
found = False
logger.debug("Get VDU response %s", rsp)
for vdu in rsp.vdu_info_list:
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME rwcm)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs
plugins
test
include(rift_plugin)
set(TASKLET_NAME rwconmantasklet)
-set(CONMAN_INSTALL "etc/conman")
-
-##
-# Install translation script in demos
-##
-install(
- FILES
- rift/tasklets/${TASKLET_NAME}/xlate_cfg.py
- rift/tasklets/${TASKLET_NAME}/xlate_tags.yml
- DESTINATION ${CONMAN_INSTALL}
- COMPONENT ${PKG_LONG_NAME})
-
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
rift/tasklets/${TASKLET_NAME}/rwconman_config.py
- rift/tasklets/${TASKLET_NAME}/rwconman_events.py
rift/tasklets/${TASKLET_NAME}/jujuconf.py
rift/tasklets/${TASKLET_NAME}/RiftCA.py
rift/tasklets/${TASKLET_NAME}/riftcm_config_plugin.py
rift/tasklets/${TASKLET_NAME}/RiftCM_rpc.py
rift/tasklets/${TASKLET_NAME}/rwconman_conagent.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import asyncio
import concurrent.futures
+import os
import re
+import shlex
import tempfile
import yaml
-import os
from gi.repository import (
RwDts as rwdts,
)
from . import riftcm_config_plugin
-from . import rwconman_events as Events
class RiftCAConfigPlugin(riftcm_config_plugin.RiftCMConfigPluginBase):
"""
Implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log,
+ loop, project, account)
self._name = account.name
self._type = riftcm_config_plugin.DEFAULT_CAP_TYPE
self._rift_install_dir = os.environ['RIFT_INSTALL']
- self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
+ self._rift_var_root_dir = os.environ['RIFT_VAR_ROOT']
self._rift_vnfs = {}
self._tasks = {}
- # Instantiate events that will handle RiftCA configuration requests
- self._events = Events.ConfigManagerEvents(dts, log, loop, self)
-
@property
def name(self):
return self._name
def riftca_log(self, name, level, log_str, *args):
getattr(self._log, level)('RiftCA:({}) {}'.format(name, log_str), *args)
-
+
@asyncio.coroutine
def notify_create_vnfr(self, agent_nsr, agent_vnfr):
"""
pass
@asyncio.coroutine
- def vnf_config_primitive(self, agent_nsr, agent_vnfr, primitive, output):
+ def _vnf_config_primitive(self, nsr_id, vnfr_id, primitive,
+ vnf_config=None, vnfd_descriptor=None):
+ '''
+ Pass vnf_config to avoid querying DTS each time
+ '''
+ self._log.debug("VNF config primitive {} for nsr {}, vnfr {}".
+ format(primitive.name, nsr_id, vnfr_id))
+
+ if vnf_config is None or vnfd_descriptor is None:
+ vnfr_msg = yield from self.get_vnfr(vnfr_id)
+ if vnfr_msg is None:
+ msg = "Unable to get VNFR {} through DTS".format(vnfr_id)
+ self._log.error(msg)
+ return 3, msg
+
+ vnf_config = vnfr_msg.vnf_configuration
+ vnfd_descriptor = vnfr_msg.vnfd
+ self._log.debug("VNF config= %s", vnf_config.as_dict())
+ self._log.debug("VNFD descriptor= %s", vnfd_descriptor.as_dict())
+
+ data = {}
+ script = None
+ found = False
+
+ configs = vnf_config.config_primitive
+ for config in configs:
+ if config.name == primitive.name:
+ found = True
+ self._log.debug("RiftCA: Found the config primitive %s",
+ config.name)
+
+ spt = config.user_defined_script
+ if spt is None:
+ self._log.error("RiftCA: VNFR {}, Did not find "
+ "script defined in config {}".
+ format(vnfr['name'], config.as_dict()))
+ return 1, "Did not find user defined script for " \
+ "config primitive {}".format(primitive.name)
+
+ spt = shlex.quote(spt.strip())
+ if spt[0] == '/':
+ script = spt
+ else:
+ script = os.path.join(self._rift_var_root_dir,
+ 'launchpad/packages/vnfd',
+ self._project.name,
+ vnfd_descriptor.id,
+ 'scripts',
+ spt)
+ self._log.debug("Rift config agent: Checking for script "
+ "in %s", script)
+ if not os.path.exists(script):
+ self._log.debug("Rift config agent: Did not find "
+ "script %s", script)
+ return 1, "Did not find user defined " \
+ "script {}".format(spt)
+
+ params = {}
+ for param in config.parameter:
+ val = None
+ for p in primitive.parameter:
+ if p.name == param.name:
+ val = p.value
+ break
+
+ if val is None:
+ val = param.default_value
+
+ if val is None:
+ # Check if mandatory parameter
+ if param.mandatory:
+ msg = "VNFR {}: Primitive {} called " \
+ "without mandatory parameter {}". \
+ format(vnfr.name, config.name,
+ param.name)
+ self._log.error(msg)
+ return 1, msg
+
+ if val:
+ val = self.convert_value(val, param.data_type)
+ params.update({param.name: val})
+
+ data['parameters'] = params
+ break
+
+ if not found:
+ msg = "Did not find the primitive {} in VNFR {}". \
+ format(primitive.name, vnfr.name)
+ self._log.error(msg)
+ return 1, msg
+
+ rc, script_err = yield from self.exec_script(script, data)
+ return rc, script_err
+
+ @asyncio.coroutine
+ def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
'''
primitives support by RiftCA
+
+ Pass vnf_config to avoid querying DTS each time
'''
- pass
-
+ try:
+ vnfr = self._rift_vnfs[vnfr_id].vnfr
+ except KeyError:
+ msg = "Did not find VNFR {} in Rift plugin".format(vnfr_id)
+ self._log.debug(msg)
+ return
+
+ output.execution_status = "failed"
+ output.execution_id = ''
+ output.execution_error_details = ''
+
+ rc, err = yield from self._vnf_config_primitive(nsr_id,
+ vnfr_id,
+ primitive)
+ self._log.debug("VNFR {} primitive {} exec status: {}".
+ format(vnfr_id, primitive.name, rc))
+
+ if rc == 0:
+ output.execution_status = "completed"
+ else:
+ self._rift_vnfs[vnfr_id].error = True
+
+ output.execution_error_details = '{}'.format(err)
+
@asyncio.coroutine
def apply_config(self, config, nsr, vnfr, rpc_ip):
""" Notification on configuration of an NSR """
vnfr_data_dict['mgmt_interface'] = vnfr.vnfr['mgmt_interface']
vnfr_data_dict['connection_point'] = []
+ vnfr_data_dict['name'] = vnfr.vnfr['name']
+ vnfr_data_dict['datacenter'] = vnfr.vnfr['datacenter']
if 'connection_point' in vnfr.vnfr:
for cp in vnfr.vnfr['connection_point']:
cp_dict = dict()
cp_dict['name'] = cp['name']
cp_dict['ip_address'] = cp['ip_address']
+ cp_dict['connection_point_id'] = cp['connection_point_id']
+ if 'virtual_cps' in cp:
+ cp_dict['virtual_cps'] = [ {k:v for k,v in vcp.items()
+ if k in ['ip_address', 'mac_address']}
+ for vcp in cp['virtual_cps'] ]
vnfr_data_dict['connection_point'].append(cp_dict)
vnfr_data_dict['vdur'] = []
data["init_config"] = init_data
data["vnfr_index_map"] = vnfr_index_map
data["vnfr_data_map"] = vnfr_data_map
-
+
tmp_file = None
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(yaml.dump(data, default_flow_style=True)
# The script has full path, use as is
script = rpc_ip.user_defined_script
else:
- script = os.path.join(self._rift_artif_dir, 'launchpad/libs', agent_nsr.nsd_id, 'scripts',
+ script = os.path.join(self._rift_var_root_dir,
+ 'launchpad/packages/nsd',
+ self._project.name,
+ agent_nsr.nsd_id, 'scripts',
rpc_ip.user_defined_script)
self._log.debug("Rift config agent: Checking for script in %s", script)
if not os.path.exists(script):
- self._log.debug("Rift config agent: Did not find scipt %s", script)
- script = os.path.join(self._rift_install_dir, 'usr/bin', rpc_ip.user_defined_script)
+ self._log.error("Rift config agent: Did not find script %s", script)
cmd = "{} {}".format(script, tmp_file.name)
self._log.debug("Rift config agent: Running the CMD: {}".format(cmd))
return task, err
+ @asyncio.coroutine
+ def apply_initial_config_new(self, agent_nsr, agent_vnfr):
+ self._log.debug("RiftCA: VNF initial config primitive for nsr {}, vnfr {}".
+ format(agent_nsr.name, agent_vnfr.name))
+
+ try:
+ vnfr = self._rift_vnfs[agent_vnfr.id].vnfr
+ except KeyError:
+ self._log.error("RiftCA: Did not find VNFR %s in RiftCA plugin",
+ agent_vnfr.name)
+ return False
+
+ class Primitive:
+ def __init__(self, name):
+ self.name = name
+ self.value = None
+ self.parameter = []
+
+ vnfr = yield from self.get_vnfr(agent_vnfr.id)
+ if vnfr is None:
+ msg = "Unable to get VNFR {} ({}) through DTS". \
+ format(agent_vnfr.id, agent_vnfr.name)
+ self._log.error(msg)
+ raise RuntimeError(msg)
+
+ vnf_config = vnfr.vnf_configuration
+ self._log.debug("VNFR %s config: %s", vnfr.name,
+ vnf_config.as_dict())
+
+ vnfd_descriptor = vnfr.vnfd
+ self._log.debug("VNFR %s vnfd descriptor: %s", vnfr.name,
+ vnfd_descriptor.as_dict())
+
+
+ # Sort the primitive based on the sequence number
+ primitives = sorted(vnf_config.initial_config_primitive,
+ key=lambda k: k.seq)
+ if not primitives:
+ self._log.debug("VNFR {}: No initial-config-primitive specified".
+ format(vnfr.name))
+ return True
+
+ for primitive in primitives:
+ if primitive.config_primitive_ref:
+ # Reference to a primitive in config primitive
+ prim = Primitive(primitive.config_primitive_ref)
+ rc, err = yield from self._vnf_config_primitive(agent_nsr.id,
+ agent_vnfr.id,
+ prim,
+ vnf_config, vnfd_descriptor)
+ if rc != 0:
+ msg = "Error executing initial config primitive" \
+ " {} in VNFR {}: rc={}, stderr={}". \
+ format(prim.name, vnfr.name, rc, err)
+ self._log.error(msg)
+ return False
+
+ elif primitive.name:
+ if not primitive.user_defined_script:
+ msg = "Primitive {} definition in initial config " \
+ "primitive for VNFR {} not supported yet". \
+ format(primitive.name, vnfr.name)
+ self._log.error(msg)
+ raise NotImplementedError(msg)
+
+ return True
+
@asyncio.coroutine
def apply_initial_config(self, agent_nsr, agent_vnfr):
"""
Apply the initial configuration
"""
- rc = False
self._log.debug("Rift config agent: Apply initial config to VNF:%s/%s",
agent_nsr.name, agent_vnfr.name)
+ rc = False
+
try:
if agent_vnfr.id in self._rift_vnfs.keys():
- # Check if VNF instance is configurable (TBD - future)
- ### Remove this once is_vnf_configurable() is implemented
- agent_vnfr.set_to_configurable()
- if agent_vnfr.is_configurable:
- # apply initial config for the vnfr
- rc = yield from self._events.apply_vnf_config(agent_vnfr.vnf_cfg)
- else:
- self._log.info("Rift config agent: VNF:%s/%s is not configurable yet!",
- agent_nsr.name, agent_vnfr.name)
+ rc = yield from self.apply_initial_config_new(agent_nsr, agent_vnfr)
+ if not rc:
+ agent_vnfr._error = True
+
+ else:
+ rc = True
except Exception as e:
self._log.error("Rift config agent: Error on initial configuration to VNF:{}/{}, e {}"
.format(agent_nsr.name, agent_vnfr.name, str(e)))
-
+
self._log.exception(e)
- return rc
+ agent_vnfr.error = True
+ return False
return rc
@asyncio.coroutine
def get_config_status(self, agent_nsr, agent_vnfr):
if agent_vnfr.id in self._rift_vnfs.keys():
+ if agent_vnfr.error:
+ return 'error'
return 'configured'
return 'unknown'
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
GET_NS_CONF_XPATH = "I,/nsr:get-ns-service-primitive-values"
GET_NS_CONF_O_XPATH = "O,/nsr:get-ns-service-primitive-values"
- def __init__(self, dts, log, loop, nsm):
+ def __init__(self, dts, log, loop, project, nsm):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsm = nsm
self._ns_regh = None
self._vnf_regh = None
self._get_ns_conf_regh = None
- self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop, nsm)
+ self.job_manager = rift.mano.config_agent.ConfigAgentJobManager(dts, log, loop,
+ project, nsm)
self._rift_install_dir = os.environ['RIFT_INSTALL']
- self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
+ self._rift_var_root_dir = os.environ['RIFT_VAR_ROOT']
@property
def reghs(self):
""" Return the NS manager instance """
return self._nsm
+ def deregister(self):
+ self._log.debug("De-register conman rpc handlers for project {}".
+ format(self._project))
+ for reg in self.reghs:
+ if reg:
+ reg.deregister()
+ reg = None
+
+ self.job_manager.deregister()
+ self.job_manager = None
+
def prepare_meta(self, rpc_ip):
try:
if vnf:
self._log.debug("nsr/vnf {}/{}, vnf_configuration: %s",
vnf.vnf_configuration)
- for primitive in vnf.vnf_configuration.service_primitive:
+ for primitive in vnf.vnf_configuration.config_primitive:
if primitive.name == primitive_name:
return primitive
if 'mgmt_interface' in vnfr.vnfr:
vnfr_data_dict['mgmt_interface'] = vnfr.vnfr['mgmt_interface']
+ vnfr_data_dict['name'] = vnfr.vnfr['name']
vnfr_data_dict['connection_point'] = []
if 'connection_point' in vnfr.vnfr:
for cp in vnfr.vnfr['connection_point']:
- cp_dict = dict()
- cp_dict['name'] = cp['name']
- cp_dict['ip_address'] = cp['ip_address']
+ cp_dict = dict(name = cp['name'],
+ ip_address = cp['ip_address'],
+ connection_point_id = cp['connection_point_id'])
+ if 'virtual_cps' in cp:
+ cp_info['virtual_cps'] = [ {k:v for k,v in vcp.items()
+ if k in ['ip_address', 'mac_address']}
+ for vcp in cp['virtual_cps'] ]
+
vnfr_data_dict['connection_point'].append(cp_dict)
try:
# The script has full path, use as is
script = rpc_ip.user_defined_script
else:
- script = os.path.join(self._rift_artif_dir, 'launchpad/packages/nsd',
- agent_nsr.id, 'scripts',
+ script = os.path.join(self._rift_var_root_dir,
+ 'launchpad/packages/nsd',
+ self._project.name,
+ agent_nsr.nsd_id, 'scripts',
rpc_ip.user_defined_script)
self._log.debug("CA-RPC: Checking for script in %s", script)
- if not os.path.exists(script):
- script = os.path.join(self._rift_install_dir, 'usr/bin', rpc_ip.user_defined_script)
cmd = "{} {}".format(script, tmp_file.name)
self._log.debug("CA-RPC: Running the CMD: {}".format(cmd))
- process = asyncio.create_subprocess_shell(cmd, loop=self._loop,
- stderr=asyncio.subprocess.PIPE)
+ process = yield from asyncio.create_subprocess_shell(
+ cmd)
return process
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts exec-ns-service-primitive"""
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
rpc_ip = msg
rpc_op = NsrYang.YangOutput_Nsr_ExecNsServicePrimitive.from_dict({
"triggered_by": rpc_ip.triggered_by,
idx += 1
op_primitive.name = primitive.name
op_primitive.execution_id = ''
- op_primitive.execution_status = 'completed'
+ op_primitive.execution_status = 'pending'
op_primitive.execution_error_details = ''
# Copy over the VNF pimitive's input parameters
nsr_param_pool.add_used_value(param.value)
for config_plugin in self.nsm.config_agent_plugins:
+ # TODO: Execute these in separate threads to prevent blocking
yield from config_plugin.vnf_config_primitive(nsr_id,
vnfr_id,
primitive,
@asyncio.coroutine
def on_get_ns_config_values_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
nsr_id = msg.nsr_id_ref
cfg_prim_name = msg.name
try:
handler=hdl_ns_get,
flags=rwdts.Flag.PUBLISHER,
)
-
-
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Charm service name accepts only a to z and -.
-def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index):
- name = "{}-{}-{}".format(nsr_name, vnfr_short_name, member_vnf_index)
+def get_vnf_unique_name(nsr_name, vnfr_name, member_vnf_index):
+ name = "{}-{}-{}".format(nsr_name, vnfr_name, member_vnf_index)
new_name = ''
for c in name:
if c.isdigit():
"""
Juju implementation of the riftcm_config_plugin.RiftCMConfigPluginBase
"""
- def __init__(self, dts, log, loop, account):
- riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop, account)
+ def __init__(self, dts, log, loop, project, account):
+ riftcm_config_plugin.RiftCMConfigPluginBase.__init__(self, dts, log, loop,
+ project, account)
self._name = account.name
self._type = 'juju'
self._ip_address = account.juju.ip_address
self._user = account.juju.user
self._secret = account.juju.secret
self._rift_install_dir = os.environ['RIFT_INSTALL']
- self._rift_artif_dir = os.environ['RIFT_ARTIFACTS']
+ self._rift_var_root_dir = os.environ['RIFT_VAR_ROOT']
############################################################
# This is wrongfully overloaded with 'juju' private data. #
# Find the charm directory
try:
- path = os.path.join(self._rift_artif_dir,
- 'launchpad/libs',
+ path = os.path.join(self._rift_var_root_dir,
+ 'launchpad/packages/vnfd',
+ self._project.name,
agent_vnfr.vnfr_msg.vnfd.id,
'charms/trusty',
charm)
return True
@asyncio.coroutine
- def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
- self._log.debug("jujuCA: VNF config primititve {} for nsr {}, vnfr_id {}".
+ def _vnf_config_primitive(self, nsr_id, vnfr_id, primitive,
+ vnf_config=None, wait=False):
+ self._log.debug("jujuCA: VNF config primitive {} for nsr {}, "
+ "vnfr_id {}".
format(primitive, nsr_id, vnfr_id))
- try:
- vnfr = self._juju_vnfs[vnfr_id].vnfr
- except KeyError:
- self._log.error("jujuCA: Did not find VNFR %s in juju plugin",
- vnfr_id)
- return
- output.execution_status = "failed"
- output.execution_id = ''
- output.execution_error_details = ''
+ if vnf_config is None:
+ vnfr_msg = yield from self.get_vnfr(vnfr_id)
+ if vnfr_msg is None:
+ msg = "Unable to get VNFR {} through DTS".format(vnfr_id)
+ self._log.error(msg)
+ return 3, msg
+
+ vnf_config = vnfr_msg.vnf_configuration
+ self._log.debug("VNF config= %s", vnf_config.as_dict())
try:
service = vnfr['vnf_juju_name']
- vnf_config = vnfr['config']
self._log.debug("VNF config %s", vnf_config)
- configs = vnf_config.service_primitive
+ configs = vnf_config.config_primitive
for config in configs:
if config.name == primitive.name:
self._log.debug("jujuCA: Found the config primitive %s",
config.name)
params = {}
- for parameter in primitive.parameter:
- if parameter.value:
- val = self.xlate(parameter.value, vnfr['tags'])
- # TBD do validation of the parameters
- data_type = 'STRING'
- found = False
- for ca_param in config.parameter:
- if ca_param.name == parameter.name:
- data_type = ca_param.data_type
- found = True
- break
- try:
- if data_type == 'INTEGER':
- tmp = int(val)
- val = tmp
- except Exception as e:
- pass
-
- if not found:
- self._log.warn("jujuCA: Did not find parameter {} for {}".
- format(parameter, config.name))
+ for parameter in config.parameter:
+ val = None
+ for p in primitive.parameter:
+ if p.name == parameter.name:
+ if p.value:
+ val = self.xlate(p.value, vnfr['tags'])
+ break
+
+ if val is None:
+ val = parameter.default_value
+
+ if val is None:
+ # Check if mandatory parameter
+ if parameter.mandatory:
+ msg = "VNFR {}: Primitive {} called " \
+ "without mandatory parameter {}". \
+ format(vnfr_msg.name, config.name,
+ parameter.name)
+ self._log.error(msg)
+ return 'failed', '', msg
+
+ if val:
+ val = self.convert_value(val, parameter.data_type)
params.update({parameter.name: val})
+ rc = ''
+ exec_id = ''
+ details = ''
if config.name == 'config':
- output.execution_id = 'config'
+ exec_id = 'config'
if len(params):
- self._log.debug("jujuCA: applying config with params {} for service {}".
+ self._log.debug("jujuCA: applying config with "
+ "params {} for service {}".
format(params, service))
- rc = yield from self.api.apply_config(params, service=service, wait=False)
+ rc = yield from self.api.apply_config(
+ params,
+ service=service,
+ wait=True)
if rc:
- # Mark as pending and check later for the status
- output.execution_status = "pending"
- self._log.debug("jujuCA: applied config {} on {}".
- format(params, service))
+ rc = "completed"
+ self._log.debug("jujuCA: applied config {} "
+ "on {}".format(params, service))
else:
- output.execution_status = 'failed'
- output.execution_error_details = \
+ rc = 'failed'
+ details = \
'Failed to apply config: {}'.format(params)
- self._log.error("jujuCA: Error applying config {} on service {}".
+ self._log.error("jujuCA: Error applying "
+ "config {} on service {}".
format(params, service))
else:
- self._log.warn("jujuCA: Did not find valid parameters for config : {}".
+ self._log.warn("jujuCA: Did not find valid "
+ "parameters for config : {}".
format(primitive.parameter))
- output.execution_status = "completed"
+ rc = "completed"
else:
- self._log.debug("jujuCA: Execute action {} on service {} with params {}".
+ self._log.debug("jujuCA: Execute action {} on "
+ "service {} with params {}".
format(config.name, service, params))
- resp = yield from self.api.execute_action(config.name,
- params,
- service=service)
+ resp = yield from self.api.execute_action(
+ config.name,
+ params,
+ service=service)
if resp:
if 'error' in resp:
- output.execution_error_details = resp['error']['Message']
+ details = resp['error']['message']
else:
- output.execution_id = resp['action']['tag']
- output.execution_status = resp['status']
- if output.execution_status == 'failed':
- output.execution_error_details = resp['message']
- self._log.debug("jujuCA: execute action {} on service {} returned {}".
- format(config.name, service, output.execution_status))
+ exec_id = resp['action']['tag']
+ rc = resp['status']
+ if rc == 'failed':
+ details = resp['message']
+
+ self._log.debug("jujuCA: execute action {} on "
+ "service {} returned {}".
+ format(config.name, service, rc))
else:
- self._log.error("jujuCA: error executing action {} for {} with {}".
- format(config.name, service, params))
- output.execution_id = ''
- output.execution_status = 'failed'
- output.execution_error_details = "Failed to queue the action"
+ self._log.error("jujuCA: error executing action "
+ "{} for {} with {}".
+ format(config.name, service,
+ params))
+ exec_id = ''
+ rc = 'failed'
+ details = "Failed to queue the action"
break
except KeyError as e:
- self._log.info("VNF %s does not have config primititves, e=%s", vnfr_id, e)
+ msg = "VNF %s does not have config primitives, e=%s", \
+ vnfr_id, e
+ self._log.exception(msg)
+ raise ValueError(msg)
+
+ while wait and (rc in ['pending', 'running']):
+ self._log.debug("JujuCA: action {}, rc {}".
+ format(exec_id, rc))
+ yield from asyncio.sleep(0.2, loop=self._loop)
+ status = yield from self.api.get_action_status(exec_id)
+ rc = status['status']
+
+ return rc, exec_id, details
+
+ @asyncio.coroutine
+ def vnf_config_primitive(self, nsr_id, vnfr_id, primitive, output):
+ try:
+ vnfr = self._juju_vnfs[vnfr_id].vnfr
+ except KeyError:
+ msg = "Did not find VNFR {} in Juju plugin".format(vnfr_id)
+ self._log.debug(msg)
+ return
+
+ output.execution_status = "failed"
+ output.execution_id = ''
+ output.execution_error_details = ''
+
+ rc, exec_id, err = yield from self._vnf_config_primitive(
+ nsr_id,
+ vnfr_id,
+ primitive)
+
+ self._log.debug("VNFR {} primitive {} exec status: {}".
+ format(vnfr.name, primitive.name, rc))
+ output.execution_status = rc
+ output.execution_id = exec_id
+ output.execution_error_details = err
@asyncio.coroutine
def apply_config(self, agent_nsr, agent_vnfr, config, rpc_ip):
# The script has full path, use as is
script = rpc_ip.user_defined_script
else:
- script = os.path.join(self._rift_artif_dir, 'launchpad/libs', agent_nsr.id, 'scripts',
+ script = os.path.join(self._rift_var_root_dir, 'launchpad/nsd',
+ self._project.name,
+ agent_nsr.id, 'scripts',
rpc_ip.user_defined_script)
self.log.debug("jujuCA: Checking for script in %s", script)
if not os.path.exists(script):
Actions in initial config may not work based on charm design
"""
- vnfr = agent_vnfr.vnfr
- service = vnfr['vnf_juju_name']
+ try:
+ vnfr = self._juju_vnfs[agent_vnfr.id].vnfr
+ except KeyError:
+ self._log.debug("Did not find VNFR %s in Juju plugin",
+ agent_vnfr.name)
+ return False
+
+ vnfr_msg = yield from self.get_vnfr(agent_vnfr.id)
+ if vnfr_msg is None:
+ msg = "Unable to get VNFR {} ({}) through DTS". \
+ format(agent_vnfr.id, agent_vnfr.name)
+ self._log.error(msg)
+ raise RuntimeError(msg)
+
+ vnf_config = vnfr_msg.vnf_configuration
+ self._log.debug("VNFR %s config: %s", vnfr_msg.name,
+ vnf_config.as_dict())
+
+ # Sort the primitive based on the sequence number
+ primitives = sorted(vnf_config.initial_config_primitive,
+ key=lambda k: k.seq)
+ if not primitives:
+ self._log.debug("VNFR {}: No initial-config-primitive specified".
+ format(vnfr_msg.name))
+ return True
+ service = vnfr['vnf_juju_name']
rc = yield from self.api.is_service_up(service=service)
if not rc:
return False
action_ids = []
try:
- vnf_cat = agent_vnfr.vnfr_msg
- if vnf_cat and vnf_cat.mgmt_interface.ip_address:
- vnfr['tags'].update({'rw_mgmt_ip': vnf_cat.mgmt_interface.ip_address})
+ if vnfr_msg.mgmt_interface.ip_address:
+ vnfr['tags'].update({'rw_mgmt_ip': vnfr_msg.mgmt_interface.ip_address})
self._log.debug("jujuCA:(%s) tags: %s", vnfr['vnf_juju_name'], vnfr['tags'])
- config = {}
- try:
- for primitive in vnfr['config'].initial_config_primitive:
- self._log.debug("jujuCA:(%s) Initial config primitive %s", vnfr['vnf_juju_name'], primitive)
+ for primitive in primitives:
+ self._log.debug("(%s) Initial config primitive %s",
+ vnfr['vnf_juju_name'], primitive.as_dict())
+ if primitive.config_primitive_ref:
+ # Reference to a primitive in config primitive
+ class Primitive:
+ def __init__(self, name):
+ self.name = name
+ self.value = None
+ self.parameter = []
+
+ prim = Primitive(primitive.config_primitive_ref)
+ rc, eid, err = yield from self._vnf_config_primitive(
+ agent_nsr.id,
+ agent_vnfr.id,
+ prim,
+ vnf_config,
+ wait=True)
+
+ if rc == "failed":
+ msg = "Error executing initial config primitive" \
+ " {} in VNFR {}: rc={}, stderr={}". \
+ format(prim.name, vnfr_msg.name, rc, err)
+ self._log.error(msg)
+ return False
+
+ elif rc == "pending":
+ action_ids.append(eid)
+
+ elif primitive.name:
+ config = {}
if primitive.name == 'config':
for param in primitive.parameter:
if vnfr['tags']:
- val = self.xlate(param.value, vnfr['tags'])
+ val = self.xlate(param.value,
+ vnfr['tags'])
config.update({param.name: val})
- except KeyError as e:
- self._log.exception("jujuCA:(%s) Initial config error(%s): config=%s",
- vnfr['vnf_juju_name'], str(e), config)
- config = None
- return False
-
- if config:
- self.juju_log('info', vnfr['vnf_juju_name'],
- "Applying Initial config:%s",
- config)
-
- rc = yield from self.api.apply_config(config, service=service)
- if rc is False:
- self.log.error("Service {} is in error state".format(service))
- return False
+ if config:
+ self.juju_log('info', vnfr['vnf_juju_name'],
+ "Applying Initial config:%s",
+ config)
- # Apply any actions specified as part of initial config
- for primitive in vnfr['config'].initial_config_primitive:
- if primitive.name != 'config':
- self._log.debug("jujuCA:(%s) Initial config action primitive %s",
- vnfr['vnf_juju_name'], primitive)
- action = primitive.name
- params = {}
- for param in primitive.parameter:
- val = self.xlate(param.value, vnfr['tags'])
- params.update({param.name: val})
-
- self._log.info("jujuCA:(%s) Action %s with params %s",
- vnfr['vnf_juju_name'], action, params)
-
- resp = yield from self.api.execute_action(action, params,
- service=service)
- if 'error' in resp:
- self._log.error("Applying initial config on {} failed for {} with {}: {}".
- format(vnfr['vnf_juju_name'], action, params, resp))
- return False
+ rc = yield from self.api.apply_config(
+ config,
+ service=service)
+ if rc is False:
+ self.log.error("Service {} is in error state".
+ format(service))
+ return False
- action_ids.append(resp['action']['tag'])
+ # Apply any actions specified as part of initial config
+ else:
+ self._log.debug("(%s) Initial config action "
+ "primitive %s",
+ vnfr['vnf_juju_name'], primitive)
+ action = primitive.name
+ params = {}
+ for param in primitive.parameter:
+ val = self.xlate(param.value, vnfr['tags'])
+ params.update({param.name: val})
+
+ self._log.info("(%s) Action %s with params %s",
+ vnfr['vnf_juju_name'], action,
+ params)
+
+ resp = yield from self.api.execute_action(
+ action,
+ params,
+ service=service)
+ if 'error' in resp:
+ self._log.error("Applying initial config on {}"
+ " failed for {} with {}: {}".
+ format(vnfr['vnf_juju_name'],
+ action, params, resp))
+ return False
+
+ action_ids.append(resp['action']['tag'])
- except KeyError as e:
- self._log.info("Juju config agent(%s): VNFR %s not managed by Juju",
- vnfr['vnf_juju_name'], agent_vnfr.id)
- return False
except Exception as e:
- self._log.exception("jujuCA:(%s) Exception juju apply_initial_config for VNFR {}: {}".
- format(vnfr['vnf_juju_name'], agent_vnfr.id, e))
+ self._log.exception("jujuCA:(%s) Exception juju "
+ "apply_initial_config for VNFR {}: {}".
+ format(vnfr['vnf_juju_name'],
+ agent_vnfr.id, e))
return False
# Check if all actions completed
for act in action_ids:
resp = yield from self.api.get_action_status(act)
if 'error' in resp:
- self._log.error("Initial config failed: {}".format(resp))
+ self._log.error("Initial config failed for action {}: {}".
+ format(act, resp))
return False
if resp['status'] == 'failed':
- self._log.error("Initial config action failed: {}".format(resp))
+ self._log.error("Initial config action failed for "
+ "action {}: {}".format(act, resp))
return False
if resp['status'] == 'pending':
# limitations under the License.
#
-import asyncio
import abc
+import asyncio
+import gi
+import os
+import os
+import stat
+import tempfile
+import yaml
+
+from urllib.parse import urlparse
+
+gi.require_version('RwDts', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
# Default config agent plugin type
DEFAULT_CAP_TYPE = "riftca"
+
+class XPaths(object):
+ @staticmethod
+ def nsr_opdata(k=None):
+ return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(k)) if k is not None else ""))
+
+ @staticmethod
+ def nsd_msg(k=None):
+ return ("C,/nsd:nsd-catalog/nsd:nsd" +
+ "[nsd:id={}]".format(quoted_key(k)) if k is not None else "")
+
+ @staticmethod
+ def vnfr_opdata(k=None):
+ return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
+ ("[vnfr:id={}]".format(quoted_key(k)) if k is not None else ""))
+
+ @staticmethod
+ def nsr_config(k=None):
+ return ("C,/nsr:ns-instance-config/nsr:nsr[nsr:id={}]".format(quoted_key(k)) if k is not None else "")
+
+
class RiftCMnsr(object):
'''
Agent class for NSR
created for Agents to use objects from NSR
'''
- def __init__(self, nsr_dict, cfg):
+ def __init__(self, nsr_dict, cfg, project):
self._nsr = nsr_dict
self._cfg = cfg
+ self._project = project
self._vnfrs = []
self._vnfrs_msg = []
self._vnfr_ids = {}
def nsr_cfg_msg(self):
return self._cfg
+ @property
+ def nsd(self):
+ return self._cfg.nsd
+
@property
def job_id(self):
''' Get a new job id for config primitive'''
if vnfr['id'] in self._vnfr_ids.keys():
agent_vnfr = self._vnfr_ids[vnfr['id']]
else:
- agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg)
+ agent_vnfr = RiftCMvnfr(self.name, vnfr, vnfr_msg, self._project)
self._vnfrs.append(agent_vnfr)
self._vnfrs_msg.append(vnfr_msg)
self._vnfr_ids[agent_vnfr.id] = agent_vnfr
def vnfr_ids(self):
return self._vnfr_ids
+ def get_member_vnfr(self, member_index):
+ for vnfr in self._vnfrs:
+ if vnfr.member_vnf_index == member_index:
+ return vnfr
+
+
class RiftCMvnfr(object):
'''
Agent base class for VNFR processing
'''
- def __init__(self, nsr_name, vnfr_dict, vnfr_msg):
+ def __init__(self, nsr_name, vnfr_dict, vnfr_msg, project):
self._vnfr = vnfr_dict
self._vnfr_msg = vnfr_msg
+ self._vnfd_msg = vnfr_msg.vnfd
self._nsr_name = nsr_name
self._configurable = False
+ self._project = project
+ self._error = False
@property
def nsr_name(self):
def vnfr_msg(self):
return self._vnfr_msg
+ @property
+ def vnfd(self):
+ return self._vnfd_msg
+
@property
def name(self):
- return self._vnfr['short_name']
+ return self._vnfr['name']
@property
def tags(self):
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]".
+ format(quoted_key(self.id)))
def set_to_configurable(self):
self._configurable = True
def vnf_cfg(self):
return self._vnfr['vnf_cfg']
+ @property
+ def error(self):
+ return self._error
+
+ @error.setter
+ def error(self, value):
+ self._error = value
+
+
class RiftCMConfigPluginBase(object):
"""
Abstract base class for the NSM Configuration agent plugin.
There will be single instance of this plugin for each plugin type.
"""
- def __init__(self, dts, log, loop, config_agent):
+ def __init__(self, dts, log, loop, project, config_agent):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._config_agent = config_agent
@property
"""Get the status of the service"""
return None
+ # Helper functions
+
+ def convert_value(self, value, type_='STRING'):
+ if type_ == 'STRING':
+ if value.startswith('file://'):
+ p = urlparse(value)
+ with open(p[2], 'r') as f:
+ val = f.read()
+ return(val)
+ return str(value)
+
+ if type_ == 'INTEGER':
+ return int(value)
+
+ if type_ == 'BOOLEAN':
+ return (value == 1) or (value.lower() == 'true')
+
+ return value
+
+ @asyncio.coroutine
+ def _read_dts(self, path, do_trace=False):
+ xpath = self._project.add_project(path)
+ self._log.debug("_read_dts path = %s", xpath)
+ flags = rwdts.XactFlag.MERGE
+ res_iter = yield from self._dts.query_read(
+ xpath, flags=flags
+ )
+
+ results = []
+ try:
+ for i in res_iter:
+ result = yield from i
+ if result is not None:
+ results.append(result.result)
+ except:
+ pass
+
+ return results
+
+
+ @asyncio.coroutine
+ def get_xpath(self, xpath):
+ self._log.debug("Attempting to get xpath: {}".format(xpath))
+ resp = yield from self._read_dts(xpath, False)
+ if len(resp) > 0:
+ self._log.debug("Got DTS resp: {}".format(resp[0]))
+ return resp[0]
+ return None
+
+ @asyncio.coroutine
+ def get_nsr(self, id):
+ self._log.debug("Attempting to get NSR: %s", id)
+ nsrl = yield from self._read_dts(XPaths.nsr_opdata(id), False)
+ nsr = None
+ if len(nsrl) > 0:
+ nsr = nsrl[0].as_dict()
+ return nsr
+
+ @asyncio.coroutine
+ def get_nsr_config(self, id):
+ self._log.debug("Attempting to get config NSR: %s", id)
+ nsrl = yield from self._read_dts(XPaths.nsr_config(id), False)
+ nsr = None
+ if len(nsrl) > 0:
+ nsr = nsrl[0]
+ return nsr
+
+ @asyncio.coroutine
+ def get_vnfr(self, id):
+ self._log.debug("Attempting to get VNFR: %s", id)
+ vnfrl = yield from self._read_dts(XPaths.vnfr_opdata(id), do_trace=False)
+ vnfr_msg = None
+ if len(vnfrl) > 0:
+ vnfr_msg = vnfrl[0]
+ return vnfr_msg
+
+ @asyncio.coroutine
+ def exec_script(self, script, data):
+ """Execute a shell script with the data as yaml input file"""
+ self._log.debug("Execute script {} with data {}".
+ format(script, data))
+
+ #Make the script executable if it is not.
+ perm = os.stat(script).st_mode
+ if not (perm & stat.S_IXUSR):
+ self._log.warning("script {} without execute permission: {}".
+ format(script, perm))
+ os.chmod(script, perm | stat.S_IXUSR)
+
+ tmp_file = None
+ with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+ tmp_file.write(yaml.dump(data, default_flow_style=True)
+ .encode("UTF-8"))
+
+ cmd = "{} {}".format(script, tmp_file.name)
+ self._log.debug("Running the CMD: {}".format(cmd))
+
+ try:
+ proc = yield from asyncio.create_subprocess_shell(
+ cmd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE)
+ rc = yield from proc.wait()
+ script_out, script_err = yield from proc.communicate()
+
+ except Exception as e:
+ msg = "Script {} caused exception: {}". \
+ format(script, e)
+ self._log.exception(msg)
+ rc = 1
+ script_err = msg
+ script_out = ''
+
+ finally:
+ # Remove the tempfile created
+ try:
+ if rc == 0:
+ os.remove(tmp_file.name)
+ except OSError as e:
+ self._log.info("Error removing tempfile {}: {}".
+ format(tmp_file.name, e))
+
+ if rc != 0:
+ if not os.path.exists(script) :
+ self._log.error("Script {} not found: ".format(script))
+ else:
+ self._log.error("Script {}: rc={}\nStdOut:{}\nStdErr:{} \nPermissions on script: {}".
+ format(script, rc, script_out, script_err, stat.filemode(os.stat(script).st_mode)))
+
+ return rc, script_err
+
@asyncio.coroutine
def invoke(self, method, *args):
try:
self._log.error("Unknown method %s invoked on config agent plugin",
method)
except Exception as e:
- self._log.error("Caught exception while invoking method: %s, Exception: %s", method, str(e))
- raise
+ self._log.exception("Caught exception while invoking method: %s, "
+ "Exception: %s", method, str(e))
+ raise e
+
return rc
class ConfigAccountHandler(object):
- def __init__(self, dts, log, loop, on_add_config_agent, on_delete_config_agent):
+ def __init__(self, dts, log, loop, project, on_add_config_agent, on_delete_config_agent):
self._log = log
self._dts = dts
self._loop = loop
+ self._project = project
self._on_add_config_agent = on_add_config_agent
self._on_delete_config_agent = on_delete_config_agent
self._log.debug("creating config account handler")
self.cloud_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_config_account_added,
on_delete_apply=self.on_config_account_deleted,
def register(self):
self.cloud_cfg_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+
+
class RiftCMConfigPlugins(object):
""" NSM Config Agent Plugins """
def __init__(self):
self._config_plugins = RiftCMConfigPlugins()
self._config_handler = ConfigAccountHandler(
- self._dts, self._log, self._loop, self._on_config_agent, self._on_config_agent_delete)
+ self._dts, self._log, self._loop, parent._project,
+ self._on_config_agent, self._on_config_agent_delete)
self._plugin_instances = {}
self._default_account_added = False
@asyncio.coroutine
def invoke_config_agent_plugins(self, method, nsr, vnfr, *args):
# Invoke the methods on all config agent plugins registered
- rc = False
+ rc = True
+
for agent in self._plugin_instances.values():
if not agent.is_vnfr_managed(vnfr.id):
continue
rc = yield from agent.invoke(method, nsr, vnfr, *args)
break
except Exception as e:
- self._log.error("Error invoking {} on {} : {}".
- format(method, agent.name, e))
- raise
+ self._log.exception("Error invoking {} on {} : {}".
+ format(method, agent.name, e))
+ raise e
self._log.info("vnfr({}), method={}, return rc={}"
.format(vnfr.name, method, rc))
return rc
def get_vnfr_config_agent(self, vnfr):
- # if (not vnfr.has_field('netconf') and
- # not vnfr.has_field('juju') and
- # not vnfr.has_field('script')):
- # return False
-
for agent in self._plugin_instances.values():
try:
if agent.is_vnfr_managed(vnfr.id):
else:
# Otherwise, instantiate a new plugin using the config agent account
self._log.debug("Instantiting new config agent using class: %s", cap_inst)
- new_instance = cap_inst(self._dts, self._log, self._loop, config_agent)
+ new_instance = cap_inst(self._dts, self._log, self._loop,
+ self._ConfigManagerConfig._project, config_agent)
self._plugin_instances[cap_name] = new_instance
# TODO (pjoseph): See why this was added, as this deletes the
def _on_config_agent_delete(self, config_agent):
self._log.debug("Got nsm plugin config agent delete, account: %s, type: %s",
config_agent.name, config_agent.account_type)
- cap_name = config_agent.account_type
+ cap_name = config_agent.name
if cap_name in self._plugin_instances:
self._log.debug("Config agent nsm plugin exists, deleting it.")
del self._plugin_instances[cap_name]
def register(self):
self._log.debug("Registering for config agent nsm plugin manager")
yield from self._config_handler.register()
-
- account = rwcfg_agent.ConfigAgentAccount()
+
+ account = rwcfg_agent.YangData_RwProject_Project_ConfigAgent_Account()
account.account_type = DEFAULT_CAP_TYPE
account.name = "RiftCA"
self._on_config_agent(account)
for account in config_agents:
self._on_config_agent(account)
+ def deregister(self):
+ self._log.debug("De-registering config agent nsm plugin manager".
+ format(self._ConfigManagerConfig._project))
+ self._config_handler.deregister()
+
def set_config_agent(self, nsr, vnfr, method):
if method == 'juju':
agent_type = 'juju'
- elif method in ['netconf', 'script']:
+ elif method in ['script']:
agent_type = DEFAULT_CAP_TYPE
else:
msg = "Unsupported configuration method ({}) for VNF:{}/{}". \
for agent in self._plugin_instances:
if self._plugin_instances[agent].agent_type == agent_type:
self._plugin_instances[agent].add_vnfr_managed(vnfr)
- self._log.debug("Added vnfr {} as config plugin {} managed".
+ self._log.debug("Added vnfr from {} from default CAs as config plugin {} managed".
format(vnfr.name, agent))
return
#
import asyncio
+import gi
import os
import stat
import subprocess
RwConmanYang as conmanY,
ProtobufC,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
+import rift.package.script
+import rift.package.store
from . import rwconman_conagent as conagent
from . import RiftCM_rpc
from . import riftcm_config_plugin
+
if sys.version_info < (3, 4, 4):
asyncio.ensure_future = asyncio.async
-def get_vnf_unique_name(nsr_name, vnfr_short_name, member_vnf_index):
- return "{}.{}.{}".format(nsr_name, vnfr_short_name, member_vnf_index)
+def get_vnf_unique_name(nsr_name, vnfr_name, member_vnf_index):
+ return "{}.{}.{}".format(nsr_name, vnfr_name, member_vnf_index)
+
class ConmanConfigError(Exception):
pass
pass
+class ScriptNotFoundError(InitialConfigError):
+ pass
+
+
def log_this_vnf(vnf_cfg):
log_vnf = ""
used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address']
self._log = log
self._loop = loop
self._parent = parent
+ self._project = parent._project
+
self._nsr_dict = {}
self.pending_cfg = {}
self.terminate_cfg = {}
self.pending_tasks = [] # User for NSRid get retry
# (mainly excercised at restart case)
- self._config_xpath = "C,/cm-config"
- self._opdata_xpath = "D,/rw-conman:cm-state"
- self.cm_config = conmanY.SoConfig()
- # RO specific configuration
- self.ro_config = {}
- for key in self.cm_config.ro_endpoint.fields:
- self.ro_config[key] = None
+ self._opdata_xpath = self._project.add_project("D,/rw-conman:cm-state")
# Initialize cm-state
self.cm_state = {}
self.cm_state['states'] = "Initialized"
# Initialize objects to register
- self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts)
+ self.cmdts_obj = ConfigManagerDTS(self._log, self._loop, self, self._dts, self._project)
self._config_agent_mgr = conagent.RiftCMConfigAgent(
self._dts,
self._log,
self._loop,
self,
)
+
+ self.riftcm_rpc_handler = RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop, self._project,
+ PretendNsm(
+ self._dts, self._log, self._loop, self))
+
self.reg_handles = [
self.cmdts_obj,
self._config_agent_mgr,
- RiftCM_rpc.RiftCMRPCHandler(self._dts, self._log, self._loop,
- PretendNsm(
- self._dts, self._log, self._loop, self)),
+ self.riftcm_rpc_handler
]
+ self._op_reg = None
def is_nsr_valid(self, nsr_id):
if nsr_id in self._nsr_dict:
def add_to_pending_tasks(self, task):
if self.pending_tasks:
for p_task in self.pending_tasks:
- if p_task['nsrid'] == task['nsrid']:
+ if (p_task['nsrid'] == task['nsrid']) and \
+ (p_task['event'] == task['event']):
# Already queued
return
try:
self.pending_tasks.append(task)
self._log.debug("add_to_pending_tasks (nsrid:%s)",
task['nsrid'])
- if len(self.pending_tasks) == 1:
+ if len(self.pending_tasks) >= 1:
self._loop.create_task(self.ConfigManagerConfig_pending_loop())
# TBD - change to info level
self._log.debug("Started pending_loop!")
+
except Exception as e:
self._log.error("Failed adding to pending tasks (%s)", str(e))
"""
if self.pending_tasks:
self._log.debug("self.pending_tasks len=%s", len(self.pending_tasks))
- task = self.pending_tasks[0]
+ task = self.pending_tasks.pop(0)
done = False
if 'nsrid' in task:
nsrid = task['nsrid']
- self._log.debug("Will execute pending task for NSR id(%s)", nsrid)
+ self._log.debug("Will execute pending task for NSR id: %s", nsrid)
try:
# Try to configure this NSR
task['retries'] -= 1
- done = yield from self.config_NSR(nsrid)
+ done = yield from self.config_NSR(nsrid, task['event'])
self._log.info("self.config_NSR status=%s", done)
except Exception as e:
- self._log.error("Failed(%s) configuring NSR(%s)," \
+ self._log.error("Failed(%s) configuring NSR(%s) for task %s," \
"retries remained:%d!",
- str(e), nsrid, task['retries'])
- finally:
- self.pending_tasks.remove(task)
+ str(e), nsrid, task['event'] , task['retries'])
+ self._log.exception(e)
+ if task['event'] == 'terminate':
+ # Ignore failure
+ done = True
if done:
- self._log.debug("Finished pending task NSR id(%s):", nsrid)
+ self._log.debug("Finished pending task NSR id: %s", nsrid)
else:
self._log.error("Failed configuring NSR(%s), retries remained:%d!",
nsrid, task['retries'])
# Initialize all handles that needs to be registered
for reg in self.reg_handles:
yield from reg.register()
-
+
+ def deregister(self):
+ # De-register all reg handles
+ self._log.debug("De-register ConfigManagerConfig for project {}".
+ format(self._project))
+
+ for reg in self.reg_handles:
+ reg.deregister()
+ reg = None
+
+ self._op_reg.delete_element(self._opdata_xpath)
+ self._op_reg.deregister()
+
@asyncio.coroutine
def register_cm_state_opdata(self):
conmanY.RecordState.CFG_PROCESS : "cfg_process",
conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed",
conmanY.RecordState.CFG_SCHED : "cfg_sched",
- conmanY.RecordState.CFG_DELAY : "cfg_delay",
conmanY.RecordState.CONNECTING : "connecting",
conmanY.RecordState.FAILED_CONNECTION : "failed_connection",
- conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected",
- conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected",
- conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected",
conmanY.RecordState.CFG_SEND : "cfg_send",
conmanY.RecordState.CFG_FAILED : "cfg_failed",
conmanY.RecordState.READY_NO_CFG : "ready_no_cfg",
conmanY.RecordState.READY : "ready",
+ conmanY.RecordState.TERMINATE : "terminate",
}
return state_dict[state]
self._log.debug("Received cm-state: msg=%s, action=%s", msg, action)
if action == rwdts.QueryAction.READ:
- show_output = conmanY.CmOpdata()
- show_output.from_dict(self.cm_state)
self._log.debug("Responding to SHOW cm-state: %s", self.cm_state)
+ show_output = conmanY.YangData_RwProject_Project_CmState()
+ show_output.from_dict(self.cm_state)
xact_info.respond_xpath(rwdts.XactRspCode.ACK,
xpath=self._opdata_xpath,
msg=show_output)
try:
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- yield from self._dts.register(xpath=self._opdata_xpath,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
+ self._op_reg = yield from self._dts.register(xpath=self._opdata_xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
self._log.info("Successfully registered for opdata(%s)", self._opdata_xpath)
except Exception as e:
self._log.error("Failed to register for opdata as (%s)", e)
+
+ def get_config_method(self, vnf_config):
+ cfg_types = ['juju', 'script']
+ for method in cfg_types:
+ if method in vnf_config:
+ return method
+ return None
@asyncio.coroutine
def process_nsd_vnf_configuration(self, nsr_obj, vnfr):
- def get_config_method(vnf_config):
- cfg_types = ['netconf', 'juju', 'script']
- for method in cfg_types:
- if method in vnf_config:
- return method
- return None
-
- def get_cfg_file_extension(method, configuration_options):
- ext_dict = {
- "netconf" : "xml",
- "script" : {
- "bash" : "sh",
- "expect" : "exp",
- },
- "juju" : "yml"
- }
-
- if method == "netconf":
- return ext_dict[method]
- elif method == "script":
- return ext_dict[method][configuration_options['script_type']]
- elif method == "juju":
- return ext_dict[method]
- else:
- return "cfg"
-
- # This is how the YAML file should look like,
- # This routine will be called for each VNF, so keep appending the file.
- # priority order is determined by the number,
- # hence no need to generate the file in that order. A dictionary will be
- # used that will take care of the order by number.
- '''
- 1 : <== This is priority
- name : trafsink_vnfd
- member_vnf_index : 2
- configuration_delay : 120
- configuration_type : netconf
- configuration_options :
- username : admin
- password : admin
- port : 2022
- target : running
- 2 :
- name : trafgen_vnfd
- member_vnf_index : 1
- configuration_delay : 0
- configuration_type : netconf
- configuration_options :
- username : admin
- password : admin
- port : 2022
- target : running
- '''
+ # Get vnf_configuration from vnfr
+ vnf_config = vnfr['vnf_configuration']
# Save some parameters needed as short cuts in flat structure (Also generated)
vnf_cfg = vnfr['vnf_cfg']
# Prepare unique name for this VNF
vnf_cfg['vnf_unique_name'] = get_vnf_unique_name(
- vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref'])
-
- nsr_obj.cfg_path_prefix = '{}/{}_{}'.format(
- nsr_obj.this_nsr_dir, vnfr['short_name'], vnfr['member_vnf_index_ref'])
- nsr_vnfr = '{}/{}_{}'.format(
- vnf_cfg['nsr_name'], vnfr['short_name'], vnfr['member_vnf_index_ref'])
-
- # Get vnf_configuration from vnfr
- vnf_config = vnfr['vnf_configuration']
+ vnf_cfg['nsr_name'], vnfr['name'], vnfr['member_vnf_index_ref'])
self._log.debug("vnf_configuration = %s", vnf_config)
- # Create priority dictionary
- cfg_priority_order = 0
- if ('config_attributes' in vnf_config and
- 'config_priority' in vnf_config['config_attributes']):
- cfg_priority_order = vnf_config['config_attributes']['config_priority']
+ method = self.get_config_method(vnf_config)
- if cfg_priority_order not in nsr_obj.nsr_cfg_config_attributes_dict:
- # No VNFR with this priority yet, initialize the list
- nsr_obj.nsr_cfg_config_attributes_dict[cfg_priority_order] = []
-
- method = get_config_method(vnf_config)
if method is not None:
- # Create all sub dictionaries first
- config_priority = {
- 'id' : vnfr['id'],
- 'name' : vnfr['short_name'],
- 'member_vnf_index' : vnfr['member_vnf_index_ref'],
- }
-
- if 'config_delay' in vnf_config['config_attributes']:
- config_priority['configuration_delay'] = vnf_config['config_attributes']['config_delay']
- vnf_cfg['config_delay'] = config_priority['configuration_delay']
-
- configuration_options = {}
self._log.debug("config method=%s", method)
- config_priority['configuration_type'] = method
vnf_cfg['config_method'] = method
# Set config agent based on method
self._config_agent_mgr.set_config_agent(
- nsr_obj.agent_nsr, vnf_cfg['agent_vnfr'], method)
-
- cfg_opt_list = [
- 'port', 'target', 'script_type', 'ip_address', 'user', 'secret',
- ]
- for cfg_opt in cfg_opt_list:
- if cfg_opt in vnf_config[method]:
- configuration_options[cfg_opt] = vnf_config[method][cfg_opt]
- vnf_cfg[cfg_opt] = configuration_options[cfg_opt]
-
- cfg_opt_list = ['mgmt_ip_address', 'username', 'password']
- for cfg_opt in cfg_opt_list:
- if cfg_opt in vnf_config['config_access']:
- configuration_options[cfg_opt] = vnf_config['config_access'][cfg_opt]
- vnf_cfg[cfg_opt] = configuration_options[cfg_opt]
-
- # Add to the cp_dict
- vnf_cp_dict = nsr_obj._cp_dict[vnfr['member_vnf_index_ref']]
- vnf_cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address']
- vnf_cp_dict['rw_username'] = vnf_cfg['username']
- vnf_cp_dict['rw_password'] = vnf_cfg['password']
-
+ nsr_obj.agent_nsr, vnf_cfg['agent_vnfr'], method)
+ else:
+ self._log.info("VNF:(%s) is not to be configured by Configuration Manager!",
+ log_this_vnf(vnfr['vnf_cfg']))
+ yield from nsr_obj.update_vnf_cm_state(vnfr, conmanY.RecordState.READY_NO_CFG)
- # TBD - see if we can neatly include the config in "config_attributes" file, no need though
- #config_priority['config_template'] = vnf_config['config_template']
- # Create config file
- vnf_cfg['juju_script'] = os.path.join(self._parent.cfg_dir, 'juju_if.py')
+ # Update the cm-state
+ nsr_obj.populate_cm_state_from_vnf_cfg()
- if 'config_template' in vnf_config:
- vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(nsr_obj.cfg_path_prefix, config_priority['configuration_type'])
- vnf_cfg['cfg_file'] = '{}.{}'.format(nsr_obj.cfg_path_prefix, get_cfg_file_extension(method, configuration_options))
- vnf_cfg['xlate_script'] = os.path.join(self._parent.cfg_dir, 'xlate_cfg.py')
- try:
- # Now write this template into file
- with open(vnf_cfg['cfg_template'], "w") as cf:
- cf.write(vnf_config['config_template'])
- except Exception as e:
- self._log.error("Processing NSD, failed to generate configuration template : %s (Error : %s)",
- vnf_config['config_template'], str(e))
- raise
+ @asyncio.coroutine
+ def update_config_primitives(self, nsr_obj):
+
+ # Process all config-primitives in the member VNFs
+ for vnfr in nsr_obj.vnfrs:
+ vnfd = vnfr['vnf_cfg']['agent_vnfr'].vnfd
- self._log.debug("VNF endpoint so far: %s", vnf_cfg)
+ try:
+ prims = vnfd.vnf_configuration.config_primitive
+ if not prims:
+ self._log.debug("VNFR {} with VNFD {} has no config primitives defined".
+ format(vnfr['name'], vnfd.name))
+ return
+ except AttributeError as e:
+ self._log.error("No config primitives found on VNFR {} ({})".
+ format(vnfr['name'], vnfd.name))
+ continue
+
+ cm_state = nsr_obj.find_vnfr_cm_state(vnfr['id'])
+ srcs = cm_state['config_parameter']['config_parameter_source']
+ reqs = cm_state['config_parameter']['config_parameter_request']
+
+ vnf_configuration = vnfd.vnf_configuration.as_dict()
+ vnf_configuration['config_primitive'] = []
+
+ for prim in prims:
+ confp = prim.as_dict()
+ if 'parameter' not in confp:
+ continue
- # Populate filled up dictionary
- config_priority['configuration_options'] = configuration_options
- nsr_obj.nsr_cfg_config_attributes_dict[cfg_priority_order].append(config_priority)
- nsr_obj.num_vnfs_to_cfg += 1
- nsr_obj._vnfr_dict[vnf_cfg['vnf_unique_name']] = vnfr
- nsr_obj._vnfr_dict[vnfr['id']] = vnfr
+ for param in confp['parameter']:
+ # First check the param in capabilities
+ found = False
+ for src in srcs:
+ for p in src['parameter']:
+ if (p['config_primitive_ref'] == confp['name']) \
+ and (p['parameter_ref'] == param['name']):
+ param['default_value'] = src['value']
+ found = True
+ break
+ if found:
+ break
+
+ if not found:
+ for req in reqs:
+ for p in req['parameter']:
+ if (p['config_primitive_ref'] == confp['name']) \
+ and (p['parameter_ref'] == param['name']):
+ param['default_value'] = req['value']
+ found = True
+ break
+ if found:
+ break
+
+ self._log.debug("Config primitive: {}".format(confp))
+ vnf_configuration['config_primitive'].append(confp)
+
+ cm_state['vnf_configuration'] = vnf_configuration
- self._log.debug("VNF:(%s) config_attributes = %s",
- log_this_vnf(vnfr['vnf_cfg']),
- nsr_obj.nsr_cfg_config_attributes_dict)
+ @asyncio.coroutine
+ def get_resolved_xpath(self, xpath, name, vnf_name, xpath_prefix):
+ # For now, use DTS to resolve the path
+ # TODO (pjoseph): Add better xpath support
+
+ dts_path = xpath
+ if xpath.startswith('../'):
+ prefix = xpath_prefix
+ xp = xpath
+ while xp.startswith('../'):
+ idx = prefix.rfind('/')
+ if idx == -1:
+ raise ValueError("VNF {}, Did not find the xpath specified: {}".
+ format(vnf_name, xpath))
+ prefix = prefix[:idx]
+ xp = xp[3:]
+
+ dts_path = prefix + '/' + xp
+
+ elif xpath.startswith('/'):
+ dts_path = 'C,' + xpath
+ elif xpath.startswith('C,/') or xpath.startswith('D,/'):
+ dts_path = xpath
else:
- self._log.info("VNF:(%s) is not to be configured by Configuration Manager!",
- log_this_vnf(vnfr['vnf_cfg']))
- yield from nsr_obj.update_vnf_cm_state(vnfr, conmanY.RecordState.READY_NO_CFG)
+ self._log.error("Invalid xpath {} for source {} in VNF {}".
+ format(xpath, name, vnf_name))
+ raise ValueError("Descriptor xpath {} in source {} for VNF {} "
+ "is invalid".
+ format(xpath, name, vnf_name))
- # Update the cm-state
- nsr_obj.populate_vm_state_from_vnf_cfg()
+ dts_path = self._project.add_project(dts_path)
+ return dts_path
@asyncio.coroutine
- def config_NSR(self, id):
+ def resolve_xpath(self, xpath, name, vnfd):
+ xpath_prefix = "C,/project-vnfd:vnfd-catalog/vnfd[id={}]/config-parameter" \
+ "/config-parameter-source[name={}]" \
+ "/descriptor".format(quoted_key(vnfd.id), quoted_key(name))
+
+ dts_path = yield from self.get_resolved_xpath(xpath, name,
+ vnfd.name, xpath_prefix)
+ idx = dts_path.rfind('/')
+ if idx == -1:
+ raise ValueError("VNFD {}, descriptor xpath {} should point to " \
+ "an attribute".format(vnfd.name, xpath))
+
+ attr = dts_path[idx+1:]
+ dts_path = dts_path[:idx]
+ self._log.debug("DTS path: {}, attribute: {}".format(dts_path, attr))
+
+ resp = yield from self.cmdts_obj.get_xpath(dts_path)
+ if resp is None:
+ raise ValueError("Xpath {} in capability {} for VNFD {} is not found".
+ format(xpath, name, vnfd.name))
+ self._log.debug("DTS response: {}".format(resp.as_dict()))
+
+ try:
+ val = getattr(resp, attr)
+ except AttributeError as e:
+ self._log.error("Did not find attribute : {}".format(attr))
+ try:
+ val = getattr(resp, attr.replace('-', '_'))
+ except AttributeError as e:
+ raise ValueError("Did not find attribute {} in XPath {} "
+ "for capability {} in VNF {}".
+ format(attr, dts_path, vnfd.name))
- def my_yaml_dump(config_attributes_dict, yf):
+ self._log.debug("XPath {}: {}".format(xpath, val))
+ return val
- yaml_dict = dict(sorted(config_attributes_dict.items()))
- yf.write(yaml.dump(yaml_dict, default_flow_style=False))
-
- nsr_dict = self._nsr_dict
- self._log.info("Configure NSR, id = %s", id)
+ @asyncio.coroutine
+ def resolve_attribute(self, attribute, name, vnfd, vnfr):
+ idx = attribute.rfind(',')
+ if idx == -1:
+ raise ValueError ("Invalid attribute {} for capability {} in "
+ "VNFD specified".
+ format(attribute, name, vnfd.name))
+ xpath = attribute[:idx].strip()
+ attr = attribute[idx+1:].strip()
+ self._log.debug("Attribute {}, {}".format(xpath, attr))
+ if xpath.startswith('C,/'):
+ raise ValueError("Attribute {} for capability {} in VNFD cannot "
+ "be a config".
+ format(attribute, name, vnfd.name))
+
+ xpath_prefix = "D,/vnfr:vnfr-catalog/vnfr[id={}]/config_parameter" \
+ "/config-parameter-source[name={}]" \
+ "/attribute".format(quoted_key(vnfr['id']), quoted_key(name))
+ dts_path = yield from self.get_resolved_xpath(xpath, name,
+ vnfr['name'],
+ xpath_prefix)
+ self._log.debug("DTS query: {}".format(dts_path))
+
+ resp = yield from self.cmdts_obj.get_xpath(dts_path)
+ if resp is None:
+ raise ValueError("Attribute {} in request {} for VNFD {} is " \
+ "not found".
+ format(xpath, name, vnfd.name))
+ self._log.debug("DTS response: {}".format(resp.as_dict()))
- #####################TBD###########################
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_create_nsr', self.id, self._nsd)
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_nsr_active', self.id, self._vnfrs)
-
try:
- if id not in nsr_dict:
- nsr_obj = ConfigManagerNSR(self._log, self._loop, self, id)
- nsr_dict[id] = nsr_obj
- else:
- self._log.info("NSR(%s) is already initialized!", id)
- nsr_obj = nsr_dict[id]
- except Exception as e:
- self._log.error("Failed creating NSR object for (%s) as (%s)", id, str(e))
- raise
+ val = getattr(resp, attr)
+ except AttributeError as e:
+ self._log.debug("Did not find attribute {}".format(attr))
+ try:
+ val = getattr(resp, attr.replace('-', '_'))
+ except AttributeError as e:
+ raise ValueError("Did not find attribute {} in XPath {} "
+ "for source {} in VNF {}".
+ format(attr, dts_path, vnfd.name))
- # Try to configure this NSR only if not already processed
- if nsr_obj.cm_nsr['state'] != nsr_obj.state_to_string(conmanY.RecordState.INIT):
- self._log.debug("NSR(%s) is already processed, state=%s",
- nsr_obj.nsr_name, nsr_obj.cm_nsr['state'])
- yield from nsr_obj.publish_cm_state()
- return True
+ self._log.debug("Attribute {}: {}".format(attribute, val))
+ return val
- cmdts_obj = self.cmdts_obj
+ @asyncio.coroutine
+ def process_vnf_config_parameter(self, nsr_obj):
+ nsd = nsr_obj.agent_nsr.nsd
+
+ # Process all capabilities in all the member VNFs
+ for vnfr in nsr_obj.vnfrs:
+ vnfd = vnfr['vnf_cfg']['agent_vnfr'].vnfd
+
+ try:
+ cparam = vnfd.config_parameter
+ except AttributeError as e:
+ self._log.debug("VNFR {} does not have VNF config parameter".
+ format(vnfr.name))
+ continue
+
+ srcs = []
+ try:
+ srcs = cparam.config_parameter_source
+ except AttributeError as e:
+ self._log.debug("VNFR {} has no source defined".
+ format(vnfr.name))
+
+ # Get the cm state dict for this vnfr
+ cm_state = nsr_obj.find_vnfr_cm_state(vnfr['id'])
+
+ cm_srcs = []
+ for src in srcs:
+ self._log.debug("VNFR {}: source {}".
+ format(vnfr['name'], src.as_dict()))
+
+ param_refs = []
+ for p in src.parameter:
+ param_refs.append({
+ 'config_primitive_ref': p.config_primitive_name_ref,
+ 'parameter_ref': p.config_primitive_parameter_ref
+ })
+
+ try:
+ val = src.value
+ self._log.debug("Got value {}".format(val))
+ if val:
+ cm_srcs.append({'name': src.name,
+ 'value': str(val),
+ 'parameter': param_refs})
+ continue
+ except AttributeError as e:
+ pass
+
+ try:
+ xpath = src.descriptor
+ # resolve xpath
+ if xpath:
+ val = yield from self.resolve_xpath(xpath, src.name, vnfd)
+ self._log.debug("Got xpath value: {}".format(val))
+ cm_srcs.append({'name': src.name,
+ 'value': str(val),
+ 'parameter': param_refs})
+ continue
+ except AttributeError as e:
+ pass
+
+ try:
+ attribute = src.attribute
+ # resolve attribute
+ if attribute:
+ val = yield from self.resolve_attribute(attribute,
+ src.name,
+ vnfd, vnfr)
+ self._log.debug("Got attribute value: {}".format(val))
+ cm_srcs.append({'name': src.name,
+ 'value': str(val),
+ 'parameter': param_refs})
+ continue
+ except AttributeError as e:
+ pass
+
+ try:
+ prim = src.primitive_ref
+ if prim:
+ raise NotImplementedError("{}: VNF config parameter {}"
+ "source support for config"
+ "primitive not yet supported".
+ format(vnfr.name, prim))
+ except AttributeError as e:
+ pass
+
+ self._log.debug("VNF config parameter sources: {}".format(cm_srcs))
+ cm_state['config_parameter']['config_parameter_source'] = cm_srcs
+
+ try:
+ reqs = cparam.config_parameter_request
+ except AttributeError as e:
+ self._log.debug("VNFR {} has no requests defined".
+ format(vnfr.name))
+ continue
+
+ cm_reqs = []
+ for req in reqs:
+ self._log.debug("VNFR{}: request {}".
+ format(vnfr['name'], req.as_dict()))
+ param_refs = []
+ for p in req.parameter:
+ param_refs.append({
+ 'config_primitive_ref': p.config_primitive_name_ref,
+ 'parameter_ref': p.config_primitive_parameter_ref
+ })
+ cm_reqs.append({'name': req.name,
+ 'parameter': param_refs})
+
+ self._log.debug("VNF requests: {}".format(cm_reqs))
+ cm_state['config_parameter']['config_parameter_request'] = cm_reqs
+
+ # Publish all config parameter for the VNFRs
+ # yield from nsr_obj.publish_cm_state()
+
+ cparam_map = []
try:
- # Fetch NSR
- nsr = yield from cmdts_obj.get_nsr(id)
- self._log.debug("Full NSR : %s", nsr)
- if nsr['operational_status'] != "running":
- self._log.info("NSR(%s) is not ready yet!", nsr['nsd_name_ref'])
- return False
- self._nsr = nsr
-
- # Create Agent NSR class
- nsr_config = yield from cmdts_obj.get_nsr_config(id)
- self._log.debug("NSR {} config: {}".format(id, nsr_config))
- nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config)
+ cparam_map = nsd.config_parameter_map
+ except AttributeError as e:
+ self._log.warning("No config parameter map specified for nsr: {}".
+ format(nsr_obj.nsr_name))
+
+ for cp in cparam_map:
+ src_vnfr = nsr_obj.agent_nsr.get_member_vnfr(
+ cp.config_parameter_source.member_vnf_index_ref)
+ cm_state = nsr_obj.find_vnfr_cm_state(src_vnfr.id)
+ if cm_state is None:
+ raise ValueError("Config parameter sources are not defined "
+ "for VNF member {} ({})".
+ format(cp.config_parameter_source.member_vnf_index_ref,
+ src_vnfr.name))
+ srcs = cm_state['config_parameter']['config_parameter_source']
+
+ src_attr = cp.config_parameter_source.config_parameter_source_ref
+ val = None
+ for src in srcs:
+ if src['name'] == src_attr:
+ val = src['value']
+ break
+
+ req_vnfr = nsr_obj.agent_nsr.get_member_vnfr(
+ cp.config_parameter_request.member_vnf_index_ref)
+ req_attr = cp.config_parameter_request.config_parameter_request_ref
+ cm_state = nsr_obj.find_vnfr_cm_state(req_vnfr.id)
+ try:
+ cm_reqs = cm_state['config_parameter']['config_parameter_request']
+ except KeyError as e:
+ raise ValueError("VNFR index {} ({}) has no requests defined".
+ format(cp.config_parameter_reequest.member_vnf_index_ref,
+ req_vnfr['name']))
+
+ for i, item in enumerate(cm_reqs):
+ if item['name'] == req_attr:
+ item['value'] = str(val)
+ cm_reqs[i] = item
+ self._log.debug("Request in VNFR {}: {}".
+ format(req_vnfr.name, item))
+ break
+
+ yield from self.update_config_primitives(nsr_obj)
+ # TODO: Confd crashing with the config-parameter publish
+ # So removing config-parameter and publishing cm-state
+ for vnfr in nsr_obj.vnfrs:
+ # Get the cm state dict for this vnfr
+ cm_state = nsr_obj.find_vnfr_cm_state(vnfr['id'])
+ del cm_state['config_parameter']['config_parameter_source']
+ del cm_state['config_parameter']['config_parameter_request']
+
+ # Publish resolved dependencies for the VNFRs
+ yield from nsr_obj.publish_cm_state()
+
+ @asyncio.coroutine
+ def config_NSR(self, id, event):
+
+ cmdts_obj = self.cmdts_obj
+ if event == 'running':
+ self._log.info("Configure NSR running, id = %s", id)
try:
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED)
+ nsr_obj = None
+ try:
+ if id not in self._nsr_dict:
+ nsr_obj = ConfigManagerNSR(self._log, self._loop, self, self._project, id)
+ self._nsr_dict[id] = nsr_obj
+ else:
+ self._log.info("NSR(%s) is already initialized!", id)
+ nsr_obj = self._nsr_dict[id]
+
+ except Exception as e:
+ self._log.error("Failed creating NSR object for (%s) as (%s)", id, str(e))
+ raise e
+
+ # Try to configure this NSR only if not already processed
+ if nsr_obj.cm_nsr['state'] != nsr_obj.state_to_string(conmanY.RecordState.INIT):
+ self._log.debug("NSR(%s) is already processed, state=%s",
+ nsr_obj.nsr_name, nsr_obj.cm_nsr['state'])
+ # Publish again in case NSM restarted
+ yield from nsr_obj.publish_cm_state()
+ return True
+
+ # Fetch NSR
+ nsr = yield from cmdts_obj.get_nsr(id)
+ self._log.debug("Full NSR : %s", nsr)
+ if nsr['operational_status'] != "running":
+ self._log.info("NSR(%s) is not ready yet!", nsr['nsd_name_ref'])
+ return False
+ self._nsr = nsr
+
+ # Create Agent NSR class
+ nsr_config = yield from cmdts_obj.get_nsr_config(id)
+ self._log.debug("NSR {} config: {}".format(id, nsr_config))
+
+ if nsr_config is None:
+ # The NST Terminate has been initiated before the configuration. Hence
+ # not proceeding with config.
+ self._log.warning("NSR - %s is deleted before Configuration. Not proceeding with configuration.", id)
+ return True
+
+ nsr_obj.agent_nsr = riftcm_config_plugin.RiftCMnsr(nsr, nsr_config,
+ self._project)
+
+ unique_cfg_vnfr_list = list()
+ unique_agent_vnfr_list = list()
+ try:
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.RECEIVED)
- # Parse NSR
- if nsr is not None:
nsr_obj.set_nsr_name(nsr['name_ref'])
- nsr_dir = os.path.join(self._parent.cfg_dir, nsr_obj.nsr_name)
- self._log.info("Checking NS config directory: %s", nsr_dir)
- if not os.path.isdir(nsr_dir):
- os.makedirs(nsr_dir)
- # self._log.critical("NS %s is not to be configured by Service Orchestrator!", nsr_obj.nsr_name)
- # yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.READY_NO_CFG)
- # return
-
- nsr_obj.set_config_dir(self)
-
for const_vnfr in nsr['constituent_vnfr_ref']:
self._log.debug("Fetching VNFR (%s)", const_vnfr['vnfr_id'])
vnfr_msg = yield from cmdts_obj.get_vnfr(const_vnfr['vnfr_id'])
if vnfr_msg:
vnfr = vnfr_msg.as_dict()
- self._log.info("create VNF:{}/{}".format(nsr_obj.nsr_name, vnfr['short_name']))
+ self._log.info("create VNF:{}/{} operational status {}".format(nsr_obj.nsr_name, vnfr['name'], vnfr['operational_status']))
agent_vnfr = yield from nsr_obj.add_vnfr(vnfr, vnfr_msg)
+ method = self.get_config_method(vnfr['vnf_configuration'])
+ if method is not None:
+ unique_cfg_vnfr_list.append(vnfr)
+ unique_agent_vnfr_list.append(agent_vnfr)
- # Preserve order, self.process_nsd_vnf_configuration()
- # sets up the config agent based on the method
+ # Process VNF Cfg
+ # Set up the config agent based on the method
yield from self.process_nsd_vnf_configuration(nsr_obj, vnfr)
- yield from self._config_agent_mgr.invoke_config_agent_plugins(
- 'notify_create_vnfr',
- nsr_obj.agent_nsr,
- agent_vnfr)
-
- #####################TBD###########################
- # self._log.debug("VNF active. Apply initial config for vnfr {}".format(vnfr.name))
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('apply_initial_config',
- # vnfr.id, vnfr)
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_vnf', self.id, vnfr)
+ else:
+ self._log.warning("NSR %s, VNFR not found yet (%s)", nsr_obj.nsr_name, const_vnfr['vnfr_id'])
- except Exception as e:
- self._log.error("Failed processing NSR (%s) as (%s)", nsr_obj.nsr_name, str(e))
- self._log.exception(e)
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
- raise
-
- try:
- # Generate config_config_attributes.yaml (For debug reference)
- with open(nsr_obj.config_attributes_file, "w") as yf:
- my_yaml_dump(nsr_obj.nsr_cfg_config_attributes_dict, yf)
- except Exception as e:
- self._log.error("NS:(%s) failed to write config attributes file as (%s)", nsr_obj.nsr_name, str(e))
+ # Process VNF config parameter
+ yield from self.process_vnf_config_parameter(nsr_obj)
- try:
- # Generate nsr_xlate_dict.yaml (For debug reference)
- with open(nsr_obj.xlate_dict_file, "w") as yf:
- yf.write(yaml.dump(nsr_obj._cp_dict, default_flow_style=False))
- except Exception as e:
- self._log.error("NS:(%s) failed to write nsr xlate tags file as (%s)", nsr_obj.nsr_name, str(e))
+ # Invoke the config agent plugin
+ for agent_vnfr in unique_agent_vnfr_list:
+ yield from self._config_agent_mgr.invoke_config_agent_plugins(
+ 'notify_create_vnfr',
+ nsr_obj.agent_nsr,
+ agent_vnfr)
- self._log.debug("Starting to configure each VNF")
+ except Exception as e:
+ self._log.error("Failed processing NSR (%s) as (%s)", nsr_obj.nsr_name, str(e))
+ self._log.exception(e)
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
+ raise e
- # Check if this NS has input parametrs
- self._log.info("Checking NS configuration order: %s", nsr_obj.config_attributes_file)
+ self._log.debug("Starting to configure each VNF")
- if os.path.exists(nsr_obj.config_attributes_file):
- # Apply configuration is specified order
try:
- # Go in loop to configure by specified order
- self._log.info("Using Dynamic configuration input parametrs for NS: %s", nsr_obj.nsr_name)
-
- # cfg_delay = nsr_obj.nsr_cfg_config_attributes_dict['configuration_delay']
- # if cfg_delay:
- # self._log.info("Applying configuration delay for NS (%s) ; %d seconds",
- # nsr_obj.nsr_name, cfg_delay)
- # yield from asyncio.sleep(cfg_delay, loop=self._loop)
-
- for config_attributes_dict in nsr_obj.nsr_cfg_config_attributes_dict.values():
- # Iterate through each priority level
- for vnf_config_attributes_dict in config_attributes_dict:
- # Iterate through each vnfr at this priority level
-
- # Make up vnf_unique_name with vnfd name and member index
- #vnfr_name = "{}.{}".format(nsr_obj.nsr_name, vnf_config_attributes_dict['name'])
- vnf_unique_name = get_vnf_unique_name(
- nsr_obj.nsr_name,
- vnf_config_attributes_dict['name'],
- str(vnf_config_attributes_dict['member_vnf_index']),
- )
- self._log.info("NS (%s) : VNF (%s) - Processing configuration attributes",
- nsr_obj.nsr_name, vnf_unique_name)
+ for cfg_vnfr in unique_cfg_vnfr_list:
+ # Apply configuration
+ vnf_unique_name = get_vnf_unique_name(
+ nsr_obj.nsr_name,
+ cfg_vnfr['name'],
+ str(cfg_vnfr['member_vnf_index_ref']),
+ )
- # Find vnfr for this vnf_unique_name
- if vnf_unique_name not in nsr_obj._vnfr_dict:
- self._log.error("NS (%s) - Can not find VNF to be configured: %s", nsr_obj.nsr_name, vnf_unique_name)
- else:
- # Save this unique VNF's config input parameters
- nsr_obj.vnf_config_attributes_dict[vnf_unique_name] = vnf_config_attributes_dict
- nsr_obj.ConfigVNF(nsr_obj._vnfr_dict[vnf_unique_name])
+ # Find vnfr for this vnf_unique_name
+ if vnf_unique_name not in nsr_obj._vnfr_dict:
+ self._log.error("NS (%s) - Can not find VNF to be configured: %s", nsr_obj.nsr_name, vnf_unique_name)
+ else:
+ # Save this unique VNF's config input parameters
+ nsr_obj.ConfigVNF(nsr_obj._vnfr_dict[vnf_unique_name])
# Now add the entire NS to the pending config list.
- self._log.info("Scheduling NSR:{} configuration".format(nsr_obj.nsr_name))
- self._parent.add_to_pending(nsr_obj)
+ self._log.info("Scheduling NSR:{} configuration ".format(nsr_obj.nsr_name))
+ self._parent.add_to_pending(nsr_obj, unique_cfg_vnfr_list)
self._parent.add_nsr_obj(nsr_obj)
except Exception as e:
self._log.error("Failed processing input parameters for NS (%s) as %s", nsr_obj.nsr_name, str(e))
+ self._log.exception(e)
raise
- else:
- self._log.error("No configuration input parameters for NSR (%s)", nsr_obj.nsr_name)
- except Exception as e:
- self._log.error("Failed to configure NS (%s) as (%s)", nsr_obj.nsr_name, str(e))
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
- raise
+ except Exception as e:
+ self._log.exception(e)
+ if nsr_obj:
+ self._log.error("Failed to configure NS (%s) as (%s)", nsr_obj.nsr_name, str(e))
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_PROCESS_FAILED)
+ raise e
+
+ elif event == 'terminate':
+ self._log.info("Configure NSR terminate, id = %s", id)
+ nsr_obj = self._parent.get_nsr_obj(id)
+ if nsr_obj is None:
+ # Can be none if the terminate is called again due to DTS query
+ return True
+
+ try:
+ yield from self.process_ns_terminate_config(nsr_obj, self._project.name)
+ except Exception as e:
+ self._log.warn("Terminate config failed for NSR {}: {}".
+ format(id, e))
+ self._log.exception(e)
+
+ try:
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.TERMINATE)
+ yield from self.terminate_NSR(id)
+ except Exception as e:
+ self._log.error("Terminate failed for NSR {}: {}".
+ format(id, e))
+ self._log.exception(e)
return True
@asyncio.coroutine
def terminate_NSR(self, id):
- nsr_dict = self._nsr_dict
- if id not in nsr_dict:
+ if id not in self._nsr_dict:
self._log.error("NSR(%s) does not exist!", id)
return
else:
+ nsr_obj = self._nsr_dict[id]
+
# Remove this NSR if we have it on pending task list
for task in self.pending_tasks:
if task['nsrid'] == id:
self.del_from_pending_tasks(task)
- # Remove this object from global list
- nsr_obj = nsr_dict.pop(id, None)
-
- # Remove this NS cm-state from global status list
- self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
-
- # Also remove any scheduled configuration event
+ # Remove any scheduled configuration event
for nsr_obj_p in self._parent.pending_cfg:
if nsr_obj_p == nsr_obj:
assert id == nsr_obj_p._nsr_id
- #self._parent.pending_cfg.remove(nsr_obj_p)
- # Mark this as being deleted so we do not try to configure it if we are in cfg_delay (will wake up and continue to process otherwise)
+ # Mark this as being deleted so we do not try to reconfigure it
+ # if we are in cfg_delay (will wake up and continue to process otherwise)
nsr_obj_p.being_deleted = True
self._log.info("Removed scheduled configuration for NSR(%s)", nsr_obj.nsr_name)
- self._parent.remove_nsr_obj(id)
-
# Call Config Agent to clean up for each VNF
for agent_vnfr in nsr_obj.agent_nsr.vnfrs:
yield from self._config_agent_mgr.invoke_config_agent_plugins(
nsr_obj.agent_nsr,
agent_vnfr)
- # publish delete cm-state (cm-nsr)
- yield from nsr_obj.delete_cm_nsr()
+ self._log.info("NSR(%s/%s) is terminated", nsr_obj.nsr_name, id)
+
+ @asyncio.coroutine
+ def delete_NSR(self, id):
+ if id not in self._nsr_dict:
+ self._log.debug("NSR(%s) does not exist!", id)
+ return
+ else:
+ # Remove this NSR if we have it on pending task list
+ for task in self.pending_tasks:
+ if task['nsrid'] == id:
+ self.del_from_pending_tasks(task)
+
+ # Remove this object from global list
+ nsr_obj = self._nsr_dict.pop(id, None)
+
+ # Remove this NS cm-state from global status list
+ self.cm_state['cm_nsr'].remove(nsr_obj.cm_nsr)
+
+ self._parent.remove_nsr_obj(id)
- #####################TBD###########################
- # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
+ # publish delete cm-state (cm-nsr)
+ yield from nsr_obj.delete_cm_nsr()
+
+ # Deleting any config jobs for NSR.
+ job_manager = self.riftcm_rpc_handler.job_manager.handler
+ job_manager._terminate_nsr(id)
+
+ #####################TBD###########################
+ # yield from self._config_agent_mgr.invoke_config_agent_plugins('notify_terminate_ns', self.id)
- self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
+ self._log.info("NSR(%s/%s) is deleted", nsr_obj.nsr_name, id)
@asyncio.coroutine
def process_initial_config(self, nsr_obj, conf, script, vnfr_name=None):
v['name'] = vnfr['name']
v['mgmt_ip_address'] = vnfr['vnf_cfg']['mgmt_ip_address']
v['mgmt_port'] = vnfr['vnf_cfg']['port']
+ v['datacenter'] = vnfr['datacenter']
if 'dashboard_url' in vnfr:
v['dashboard_url'] = vnfr['dashboard_url']
if 'connection_point' in vnfr:
v['connection_point'] = []
for cp in vnfr['connection_point']:
- v['connection_point'].append(
- {
- 'name': cp['name'],
- 'ip_address': cp['ip_address'],
- }
- )
+ cp_info = dict(name=cp['name'],
+ ip_address=cp['ip_address'],
+ mac_address=cp.get('mac_address', None),
+ connection_point_id=cp.get('connection_point_id',None))
+
+ if 'virtual_cps' in cp:
+ cp_info['virtual_cps'] = [ {k:v for k,v in vcp.items()
+ if k in ['ip_address', 'mac_address']}
+ for vcp in cp['virtual_cps'] ]
+ v['connection_point'].append(cp_info)
- v['vdur'] = []
- vdu_data = []
- for vdu in vnfr['vdur']:
- d = {}
- for k in ['name','management_ip', 'vm_management_ip', 'id', 'vdu_id_ref']:
- if k in vdu:
- d[k] = vdu[k]
- vdu_data.append(d)
- v['vdur'] = vdu_data
+
+ if 'vdur' in vnfr:
+ vdu_data = [(vdu.get('name',None), vdu.get('management_ip',None), vdu.get('vm_management_ip',None), vdu.get('id',None))
+ for vdu in vnfr['vdur']]
+
+ v['vdur'] = [ dict(zip(['name', 'management_ip', 'vm_management_ip', 'id', 'vdu_id_ref'] , data)) for data in vdu_data ]
inp['vnfr'][vnfr['member_vnf_index_ref']] = v
try:
os.remove(inp_file)
except Exception as e:
- self._log.debug("Error removing input file {}: {}".
+ self._log.error("Error removing input file {}: {}".
format(inp_file, e))
- def get_script_file(self, script_name, d_name, d_id, d_type):
- # Get the full path to the script
- script = ''
- # If script name starts with /, assume it is full path
- if script_name[0] == '/':
- # The script has full path, use as is
- script = script_name
- else:
- script = os.path.join(os.environ['RIFT_ARTIFACTS'],
- 'launchpad/packages',
- d_type,
- d_id,
- d_name,
- 'scripts',
- script_name)
- self._log.debug("Checking for script at %s", script)
- if not os.path.exists(script):
- self._log.warning("Did not find script %s", script)
- script = os.path.join(os.environ['RIFT_INSTALL'],
- 'usr/bin',
- script_name)
-
- # Seen cases in jenkins, where the script execution fails
- # with permission denied. Setting the permission on script
- # to make sure it has execute permission
- perm = os.stat(script).st_mode
- if not (perm & stat.S_IXUSR):
- self._log.warning("NSR/VNFR {} initial config script {} " \
+ def get_script_file(self, script_name, d_name, d_id, d_type, project=None):
+ # Get the full path to the script
+ script = os.path.join(os.getenv('RIFT_VAR_ROOT'),
+ 'launchpad/packages',
+ d_type,
+ project if project else "",
+ d_id,
+ 'scripts',
+ script_name)
+
+ self._log.debug("Checking for script at %s", script)
+ if not os.path.exists(script):
+ err_msg = ("{} {}: Did not find script {} for config".
+ format(d_type, d_name, script))
+ self._log.error(err_msg)
+ raise ScriptNotFoundError(err_msg)
+
+ # Seen cases in jenkins, where the script execution fails
+ # with permission denied. Setting the permission on script
+ # to make sure it has execute permission
+ perm = os.stat(script).st_mode
+ if not (perm & stat.S_IXUSR):
+ self._log.warning("NSR/VNFR {} script {} " \
"without execute permission: {}".
format(d_name, script, perm))
- os.chmod(script, perm | stat.S_IXUSR)
- return script
+ os.chmod(script, perm | stat.S_IXUSR)
+ return script
@asyncio.coroutine
- def process_ns_initial_config(self, nsr_obj):
- '''Apply the initial-config-primitives specified in NSD'''
-
+ def process_ns_initial_config(self, nsr_obj, project=None):
+ '''Apply the initial-service-primitives specified in NSD'''
nsr = yield from self.cmdts_obj.get_nsr(nsr_obj.nsr_id)
- if 'initial_config_primitive' not in nsr:
+ self._log.debug("NS initial config: {}".format(nsr))
+ if 'initial_service_primitive' not in nsr:
return
-
if nsr is not None:
nsd = yield from self.cmdts_obj.get_nsd(nsr_obj.nsr_id)
- for conf in nsr['initial_config_primitive']:
+ for conf in nsr['initial_service_primitive']:
self._log.debug("NSR {} initial config: {}".
format(nsr_obj.nsr_name, conf))
script = self.get_script_file(conf['user_defined_script'],
nsd.name,
nsd.id,
- 'nsd')
+ 'nsd',
+ project
+ )
yield from self.process_initial_config(nsr_obj, conf, script)
@asyncio.coroutine
- def process_vnf_initial_config(self, nsr_obj, vnfr):
+ def process_vnf_initial_config(self, nsr_obj, vnfr, project=None):
'''Apply the initial-config-primitives specified in VNFD'''
-
vnfr_name = vnfr.name
vnfd = vnfr.vnfd
vnf_cfg = vnfd.vnf_configuration
for conf in vnf_cfg.initial_config_primitive:
- self._log.debug("VNFR {} initial config: {}".
- format(vnfr_name, conf))
+ self._log.debug("VNFR {} initial config: {} for vnfd id {}".
+ format(vnfr_name, conf, vnfd.id))
if not conf.user_defined_script:
- self._log.debug("VNFR {} did not fine user defined script: {}".
+ self._log.debug("VNFR {} did not find user defined script: {}".
format(vnfr_name, conf))
continue
script = self.get_script_file(conf.user_defined_script,
vnfd.name,
vnfd.id,
- 'vnfd')
+ 'vnfd',
+ project
+ )
yield from self.process_initial_config(nsr_obj,
conf.as_dict(),
script,
vnfr_name=vnfr_name)
+ @asyncio.coroutine
+ def process_ns_terminate_config(self, nsr_obj, project=None):
+ '''Apply the terminate-service-primitives specified in NSD'''
+
+ nsr = self._nsr
+ if 'terminate_service_primitive' not in nsr:
+ return
+
+ if nsr is not None:
+ nsd = nsr_obj.agent_nsr.nsd
+ for conf in nsr['terminate_service_primitive']:
+ self._log.debug("NSR {} terminate service: {}".
+ format(nsr_obj.nsr_name, conf))
+ script = self.get_script_file(conf['user_defined_script'],
+ nsd.name,
+ nsd.id,
+ 'nsd',
+ project)
+
+ try:
+ yield from self.process_initial_config(nsr_obj, conf, script)
+
+ except Exception as e:
+ # Ignore any failures on terminate
+ self._log.warning("NSR {} terminate config script {} failed: {}".
+ format(nsr_obj.nsr_name, script, e))
+ break
+
class ConfigManagerNSR(object):
- def __init__(self, log, loop, parent, id):
+ def __init__(self, log, loop, parent, project, id):
self._log = log
self._loop = loop
self._rwcal = None
self._cp_dict = {}
self._nsr_id = id
self._parent = parent
+ self._project = project
self._log.info("Instantiated NSR entry for id=%s", id)
self.nsr_cfg_config_attributes_dict = {}
self.vnf_config_attributes_dict = {}
@property
def nsr_opdata_xpath(self):
''' Returns full xpath for this NSR cm-state opdata '''
- return(
- "D,/rw-conman:cm-state" +
- "/rw-conman:cm-nsr[rw-conman:id='{}']"
- ).format(self._nsr_id)
+ return self._project.add_project((
+ "D,/rw-conman:cm-state/rw-conman:cm-nsr[rw-conman:id={}]"
+ ).format(quoted_key(self._nsr_id)))
@property
def vnfrs(self):
def publish_cm_state(self):
''' This function publishes cm_state for this NSR '''
- cm_state = conmanY.CmOpdata()
+ cm_state = conmanY.YangData_RwProject_Project_CmState()
cm_state_nsr = cm_state.cm_nsr.add()
cm_state_nsr.from_dict(self.cm_nsr)
#with self._dts.transaction() as xact:
self.nsr_name = name
self.cm_nsr['name'] = name
- def set_config_dir(self, caller):
- self.this_nsr_dir = os.path.join(
- caller._parent.cfg_dir, self.nsr_name, caller._nsr['name_ref'])
- if not os.path.exists(self.this_nsr_dir):
- os.makedirs(self.this_nsr_dir)
- self._log.debug("NSR:(%s), Created configuration directory(%s)",
- caller._nsr['name_ref'], self.this_nsr_dir)
- self.config_attributes_file = os.path.join(self.this_nsr_dir, "configuration_config_attributes.yml")
- self.xlate_dict_file = os.path.join(self.this_nsr_dir, "nsr_xlate_dict.yml")
-
- def xlate_conf(self, vnfr, vnf_cfg):
-
- # If configuration type is not already set, try to read from attributes
- if vnf_cfg['interface_type'] is None:
- # Prepare unique name for this VNF
- vnf_unique_name = get_vnf_unique_name(
- vnf_cfg['nsr_name'],
- vnfr['short_name'],
- vnfr['member_vnf_index_ref'],
- )
-
- # Find this particular (unique) VNF's config attributes
- if (vnf_unique_name in self.vnf_config_attributes_dict):
- vnf_cfg_config_attributes_dict = self.vnf_config_attributes_dict[vnf_unique_name]
- vnf_cfg['interface_type'] = vnf_cfg_config_attributes_dict['configuration_type']
- if 'configuration_options' in vnf_cfg_config_attributes_dict:
- cfg_opts = vnf_cfg_config_attributes_dict['configuration_options']
- for key, value in cfg_opts.items():
- vnf_cfg[key] = value
-
- cfg_path_prefix = '{}/{}/{}_{}'.format(
- self._parent._parent.cfg_dir,
- vnf_cfg['nsr_name'],
- vnfr['short_name'],
- vnfr['member_vnf_index_ref'],
- )
-
- vnf_cfg['cfg_template'] = '{}_{}_template.cfg'.format(cfg_path_prefix, vnf_cfg['interface_type'])
- vnf_cfg['cfg_file'] = '{}.cfg'.format(cfg_path_prefix)
- vnf_cfg['xlate_script'] = self._parent._parent.cfg_dir + '/xlate_cfg.py'
-
- self._log.debug("VNF endpoint so far: %s", vnf_cfg)
-
- self._log.info("Checking cfg_template %s", vnf_cfg['cfg_template'])
- if os.path.exists(vnf_cfg['cfg_template']):
- return True
- return False
-
def ConfigVNF(self, vnfr):
vnf_cfg = vnfr['vnf_cfg']
if (vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY_NO_CFG)
or
vnf_cm_state['state'] == self.state_to_string(conmanY.RecordState.READY)):
- self._log.warning("NS/VNF (%s/%s) is already configured! Skipped.", self.nsr_name, vnfr['short_name'])
+ self._log.warning("NS/VNF (%s/%s) is already configured! Skipped.", self.nsr_name, vnfr['name'])
return
#UPdate VNF state
self._cp_dict['rw_mgmt_ip'] = vnf_cfg['mgmt_ip_address']
self._cp_dict['rw_username'] = vnf_cfg['username']
self._cp_dict['rw_password'] = vnf_cfg['password']
- ############################################################
- # TBD - Need to lookup above 3 for a given VNF, not global #
- # Once we do that no need to dump below file again before #
- # each VNF configuration translation. #
- # This will require all existing config templates to be #
- # changed for above three tags to include member index #
- ############################################################
- try:
- nsr_obj = vnf_cfg['nsr_obj']
- # Generate config_config_attributes.yaml (For debug reference)
- with open(nsr_obj.xlate_dict_file, "w") as yf:
- yf.write(yaml.dump(nsr_obj._cp_dict, default_flow_style=False))
- except Exception as e:
- self._log.error("NS:(%s) failed to write nsr xlate tags file as (%s)", nsr_obj.nsr_name, str(e))
-
- if 'cfg_template' in vnf_cfg:
- script_cmd = 'python3 {} -i {} -o {} -x "{}"'.format(vnf_cfg['xlate_script'], vnf_cfg['cfg_template'], vnf_cfg['cfg_file'], self.xlate_dict_file)
- self._log.debug("xlate script command (%s)", script_cmd)
- #xlate_msg = subprocess.check_output(script_cmd).decode('utf-8')
- xlate_msg = subprocess.check_output(script_cmd, shell=True).decode('utf-8')
- self._log.info("xlate script output (%s)", xlate_msg)
except Exception as e:
vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_PROCESS_FAILED)
- self._log.error("Failed to execute translation script for VNF: %s with (%s)", log_this_vnf(vnf_cfg), str(e))
+ self._log.error("Failed to set tags for VNF: %s with (%s)", log_this_vnf(vnf_cfg), str(e))
return
self._log.info("Applying config to VNF: %s = %s!", log_this_vnf(vnf_cfg), vnf_cfg)
try:
- #self.vnf_cfg_list.append(vnf_cfg)
self._log.debug("Scheduled configuration!")
vnf_cm_state['state'] = self.state_to_string(conmanY.RecordState.CFG_SCHED)
except Exception as e:
{
'cm_vnfr': [
{
- 'cfg_location': 'location1',
- 'cfg_type': 'script',
'connection_point': [
{'ip_address': '1.1.1.1', 'name': 'vnf1cp1'},
{'ip_address': '1.1.1.2', 'name': 'vnf1cp2'}
'state': 'init'
},
{
- 'cfg_location': 'location2',
- 'cfg_type': 'netconf',
'connection_point': [{'ip_address': '2.1.1.1', 'name': 'vnf2cp1'},
{'ip_address': '2.1.1.2', 'name': 'vnf2cp2'}],
'id': 'vnfrid2',
'states': 'Initialized, '
})
- def populate_vm_state_from_vnf_cfg(self):
+ def populate_cm_state_from_vnf_cfg(self):
# Fill in each VNFR from this nsr object
vnfr_list = self._vnfr_list
for vnfr in vnfr_list:
# Fill in VNF configuration details
vnf_cm_state['cfg_type'] = vnf_cfg['config_method']
- vnf_cm_state['cfg_location'] = vnf_cfg['cfg_file']
# Fill in each connection-point for this VNF
if "connection_point" in vnfr:
cp_list = vnfr['connection_point']
for cp_item_dict in cp_list:
- vnf_cm_state['connection_point'].append(
- {
- 'name' : cp_item_dict['name'],
- 'ip_address' : cp_item_dict['ip_address'],
- }
- )
+ try:
+ vnf_cm_state['connection_point'].append(
+ {
+ 'name' : cp_item_dict['name'],
+ 'ip_address' : cp_item_dict['ip_address'],
+ 'connection_point_id' : cp_item_dict['connection_point_id'],
+ }
+ )
+ except Exception:
+ # Added to make mano_ut work
+ pass
def state_to_string(self, state):
state_dict = {
conmanY.RecordState.CFG_PROCESS : "cfg_process",
conmanY.RecordState.CFG_PROCESS_FAILED : "cfg_process_failed",
conmanY.RecordState.CFG_SCHED : "cfg_sched",
- conmanY.RecordState.CFG_DELAY : "cfg_delay",
conmanY.RecordState.CONNECTING : "connecting",
conmanY.RecordState.FAILED_CONNECTION : "failed_connection",
- conmanY.RecordState.NETCONF_CONNECTED : "netconf_connected",
- conmanY.RecordState.NETCONF_SSH_CONNECTED : "netconf_ssh_connected",
- conmanY.RecordState.RESTCONF_CONNECTED : "restconf_connected",
conmanY.RecordState.CFG_SEND : "cfg_send",
conmanY.RecordState.CFG_FAILED : "cfg_failed",
conmanY.RecordState.READY_NO_CFG : "ready_no_cfg",
conmanY.RecordState.READY : "ready",
+ conmanY.RecordState.TERMINATE : "terminate",
}
return state_dict[state]
# Not found, Create and Initialize this VNF cm-state
vnf_cm_state = {
'id' : vnfr['id'],
- 'name' : vnfr['short_name'],
+ 'name' : vnfr['name'],
'state' : self.state_to_string(conmanY.RecordState.RECEIVED),
'mgmt_interface' :
{
'ip_address' : vnf_cfg['mgmt_ip_address'],
'port' : vnf_cfg['port'],
},
- 'cfg_type' : vnf_cfg['config_method'],
- 'cfg_location' : vnf_cfg['cfg_file'],
'connection_point' : [],
+ 'config_parameter' :
+ {
+ 'config_parameter_source' : [],
+ 'config_parameter_request' : [],
+ },
}
self.cm_nsr['cm_vnfr'].append(vnf_cm_state)
vnf_cm_state = self.find_vnfr_cm_state(vnfr['id'])
if vnf_cm_state is None:
self._log.error("No opdata found for NS/VNF:%s/%s!",
- self.nsr_name, vnfr['short_name'])
+ self.nsr_name, vnfr['name'])
return
if vnf_cm_state['state'] != self.state_to_string(state):
yield from self.publish_cm_state()
self._log.info("VNF ({}/{}/{}) state change: {} -> {}"
.format(self.nsr_name,
- vnfr['short_name'],
+ vnfr['name'],
vnfr['member_vnf_index_ref'],
old_state,
vnf_cm_state['state']))
self._cp_dict[vnfr['member_vnf_index_ref']].update(subnet)
self._cp_dict.update(subnet)
self._log.debug("VNF:(%s) Updated assigned subnet = %s",
- vnfr['short_name'], subnet)
+ vnfr['name'], subnet)
except Exception as e:
self._log.error("VNF:(%s) VLR Error = %s",
- vnfr['short_name'], e)
-
+ vnfr['name'], e)
+
if vnfr['id'] not in self._vnfr_dict:
- self._log.info("NSR(%s) : Adding VNF Record for name=%s, id=%s", self._nsr_id, vnfr['short_name'], vnfr['id'])
+ self._log.info("NSR(%s) : Adding VNF Record for name=%s, id=%s", self._nsr_id, vnfr['name'], vnfr['id'])
# Add this vnfr to the list for show, or single traversal
self._vnfr_list.append(vnfr)
else:
- self._log.warning("NSR(%s) : VNF Record for name=%s, id=%s already exists, overwriting", self._nsr_id, vnfr['short_name'], vnfr['id'])
+ self._log.warning("NSR(%s) : VNF Record for name=%s, id=%s already exists, overwriting",
+ self._nsr_id, vnfr['name'], vnfr['id'])
# Make vnfr available by id as well as by name
- unique_name = get_vnf_unique_name(self.nsr_name, vnfr['short_name'], vnfr['member_vnf_index_ref'])
+ unique_name = get_vnf_unique_name(self.nsr_name, vnfr['name'], vnfr['member_vnf_index_ref'])
self._vnfr_dict[unique_name] = vnfr
self._vnfr_dict[vnfr['id']] = vnfr
'agent_vnfr' : self.agent_nsr.add_vnfr(vnfr, vnfr_msg),
'nsr_name' : self.nsr_name,
'nsr_id' : self._nsr_id,
- 'vnfr_name' : vnfr['short_name'],
+ 'vnfr_name' : vnfr['name'],
'member_vnf_index' : vnfr['member_vnf_index_ref'],
'port' : 0,
- 'username' : 'admin',
- 'password' : 'admin',
+ 'username' : '@rift',
+ 'password' : 'rift',
'config_method' : 'None',
'protocol' : 'None',
'mgmt_ip_address' : '0.0.0.0',
'script_type' : 'bash',
}
+ ##########################
# Update the mgmt ip address
# In case the config method is none, this is not
# updated later
except Exception as e:
self._log.warn(
"VNFR {}({}), unable to retrieve mgmt ip address: {}".
- format(vnfr['short_name'], vnfr['id'], e))
+ format(vnfr['name'], vnfr['id'], e))
vnfr['vnf_cfg'] = vnf_cfg
self.find_or_create_vnfr_cm_state(vnf_cfg)
cp_list += vdur['internal_connection_point']
for cp_item_dict in cp_list:
+ if 'ip_address' not in cp_item_dict:
+ self._log.error("connection point {} doesnot have an ip address assigned ".
+ format(cp_item_dict['name']))
+ continue
# Populate global dictionary
self._cp_dict[
cp_item_dict['name']
if 'internal_vlr' in vnfr:
for ivlr in vnfr['internal_vlr']:
yield from populate_subnets_from_vlr(ivlr['vlr_ref'])
-
+
# Update vnfr
vnf_cfg['agent_vnfr']._vnfr = vnfr
return vnf_cfg['agent_vnfr']
@staticmethod
def nsr_opdata(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsd_msg(k=None):
- return ("C,/nsd:nsd-catalog/nsd:nsd" +
- "[nsd:id = '{}']".format(k) if k is not None else "")
+ return ("C,/project-nsd:nsd-catalog/project-nsd:nsd" +
+ "[project-nsd:id={}]".format(quoted_key(k)) if k is not None else "")
@staticmethod
def vnfr_opdata(k=None):
return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
- ("[vnfr:id='{}']".format(k) if k is not None else ""))
+ ("[vnfr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
- def vnfd(k=None):
+ def vnfd_path(k=None):
return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
- ("[vnfd:id='{}']".format(k) if k is not None else ""))
+ ("[vnfd:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def config_agent(k=None):
return ("D,/rw-config-agent:config-agent/rw-config-agent:account" +
- ("[rw-config-agent:name='{}']".format(k) if k is not None else ""))
+ ("[rw-config-agent:name={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_config(k=None):
- return ("C,/nsr:ns-instance-config/nsr:nsr[nsr:id='{}']".format(k) if k is not None else "")
+ return ("C,/nsr:ns-instance-config/nsr:nsr" +
+ ("[nsr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vlr(k=None):
- return ("D,/vlr:vlr-catalog/vlr:vlr[vlr:id='{}']".format(k) if k is not None else "")
+ return ("D,/vlr:vlr-catalog/vlr:vlr" +
+ ("[vlr:id={}]".format(quoted_key(k)) if k is not None else ""))
class ConfigManagerDTS(object):
''' This class either reads from DTS or publishes to DTS '''
- def __init__(self, log, loop, parent, dts):
+ def __init__(self, log, loop, parent, dts, project):
self._log = log
self._loop = loop
self._parent = parent
self._dts = dts
+ self._project = project
@asyncio.coroutine
- def _read_dts(self, xpath, do_trace=False):
+ def _read_dts(self, path, do_trace=False):
+ xpath = self._project.add_project(path)
self._log.debug("_read_dts path = %s", xpath)
flags = rwdts.XactFlag.MERGE
+ flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self._dts.query_read(
xpath, flags=flags
)
return results
+ @asyncio.coroutine
+ def get_xpath(self, xpath):
+ self._log.debug("Attempting to get xpath: {}".format(xpath))
+ resp = yield from self._read_dts(xpath, False)
+ if len(resp) > 0:
+ self._log.debug("Got DTS resp: {}".format(resp[0]))
+ return resp[0]
+ return None
+
@asyncio.coroutine
def get_nsr(self, id):
self._log.debug("Attempting to get NSR: %s", id)
return vnfr_msg
@asyncio.coroutine
- def get_vnfd(self, vnfd_id):
- self._log.debug("Attempting to get VNFD: %s", vnfd_id)
- vnfdl = yield from self._read_dts(XPaths.vnfd(vnfd_id), do_trace=False)
+ def get_vnfd(self, id):
+ self._log.debug("Attempting to get VNFD: %s", XPaths.vnfd_path(id))
+ vnfdl = yield from self._read_dts(XPaths.vnfd_path(id), do_trace=False)
vnfd_msg = None
if len(vnfdl) > 0:
vnfd_msg = vnfdl[0]
@asyncio.coroutine
def get_vlr(self, id):
self._log.debug("Attempting to get VLR subnet: %s", id)
- vlrl = yield from self._read_dts(XPaths.vlr(id), do_trace=True)
+ vlrl = yield from self._read_dts(XPaths.vlr(id), do_trace=False)
vlr_msg = None
if len(vlrl) > 0:
vlr_msg = vlrl[0]
return cfgagentl
@asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update a cm-state (cm-nsr) record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating cm-state %s:%s dts_pub_hdl = %s", path, msg, self.dts_pub_hdl)
self.dts_pub_hdl.update_element(path, msg, flags)
self._log.debug("Updated cm-state, %s:%s", path, msg)
@asyncio.coroutine
- def delete(self, path):
+ def delete(self, xpath):
"""
Delete cm-nsr record in DTS with the path only
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting cm-nsr %s dts_pub_hdl = %s", path, self.dts_pub_hdl)
self.dts_pub_hdl.delete_element(path)
self._log.debug("Deleted cm-nsr, %s", path)
def register(self):
yield from self.register_to_publish()
yield from self.register_for_nsr()
-
+
+ def deregister(self):
+ self._log.debug("De-registering conman config for project {}".
+ format(self._project.name))
+ if self.dts_reg_hdl:
+ self.dts_reg_hdl.deregister()
+ self.dts_reg_hdl = None
+
+ if self.dts_pub_hdl:
+ self.dts_pub_hdl.deregister()
+ self.dts_pub_hdl = None
+
@asyncio.coroutine
def register_to_publish(self):
''' Register to DTS for publishing cm-state opdata '''
- xpath = "D,/rw-conman:cm-state/rw-conman:cm-nsr"
+ xpath = self._project.add_project("D,/rw-conman:cm-state/rw-conman:cm-nsr")
self._log.debug("Registering to publish cm-state @ %s", xpath)
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
@property
def nsr_xpath(self):
- return "D,/nsr:ns-instance-opdata/nsr:nsr"
+ return self._project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr")
@asyncio.coroutine
def register_for_nsr(self):
if (query_action == rwdts.QueryAction.UPDATE or
query_action == rwdts.QueryAction.CREATE):
- msg_dict = msg.as_dict()
- # Update Each NSR/VNFR state)
- if ('operational_status' in msg_dict and
- msg_dict['operational_status'] == 'running'):
+ # Update Each NSR/VNFR state
+ if msg.operational_status in ['running', 'terminate']:
# Add to the task list
- self._parent.add_to_pending_tasks({'nsrid' : msg_dict['ns_instance_config_ref'], 'retries' : 5})
+ self._parent.add_to_pending_tasks({
+ 'nsrid' : msg.ns_instance_config_ref,
+ 'retries' : 5,
+ 'event' : msg.operational_status,
+ })
+
elif query_action == rwdts.QueryAction.DELETE:
nsr_id = msg.ns_instance_config_ref
- asyncio.ensure_future(self._parent.terminate_NSR(nsr_id), loop=self._loop)
+ self._log.debug("Got terminate for NSR id %s", nsr_id)
+ asyncio.ensure_future(self._parent.delete_NSR(nsr_id), loop=self._loop)
+
else:
raise NotImplementedError(
"%s action on cm-state not supported",
+++ /dev/null
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-import ncclient
-import ncclient.asyncio_manager
-import tornado.httpclient as tornadoh
-import asyncio.subprocess
-import asyncio
-import time
-import sys
-import os, stat
-
-import gi
-gi.require_version('RwDts', '1.0')
-gi.require_version('RwYang', '1.0')
-gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfrYang', '1.0')
-
-from gi.repository import (
- RwDts as rwdts,
- RwYang,
- RwConmanYang as conmanY,
- RwNsrYang as nsrY,
- RwVnfrYang as vnfrY,
-)
-
-import rift.tasklets
-
-if sys.version_info < (3, 4, 4):
- asyncio.ensure_future = asyncio.async
-
-def log_this_vnf(vnf_cfg):
- log_vnf = ""
- used_item_list = ['nsr_name', 'vnfr_name', 'member_vnf_index', 'mgmt_ip_address']
- for item in used_item_list:
- if item in vnf_cfg:
- if item == 'mgmt_ip_address':
- log_vnf += "({})".format(vnf_cfg[item])
- else:
- log_vnf += "{}/".format(vnf_cfg[item])
- return log_vnf
-
-class ConfigManagerROifConnectionError(Exception):
- pass
-class ScriptError(Exception):
- pass
-
-
-class ConfigManagerEvents(object):
- def __init__(self, dts, log, loop, parent):
- self._dts = dts
- self._log = log
- self._loop = loop
- self._parent = parent
- self._nsr_xpath = "/cm-state/cm-nsr"
-
- @asyncio.coroutine
- def register(self):
- pass
-
- @asyncio.coroutine
- def update_vnf_state(self, vnf_cfg, state):
- nsr_obj = vnf_cfg['nsr_obj']
- yield from nsr_obj.update_vnf_cm_state(vnf_cfg['vnfr'], state)
-
- @asyncio.coroutine
- def apply_vnf_config(self, vnf_cfg):
- self._log.debug("apply_vnf_config VNF:{}"
- .format(log_this_vnf(vnf_cfg)))
-
- if vnf_cfg['config_delay']:
- yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_DELAY)
- yield from asyncio.sleep(vnf_cfg['config_delay'], loop=self._loop)
-
- # See if we are still alive!
- if vnf_cfg['nsr_obj'].being_deleted:
- # Don't do anything, just return
- self._log.info("VNF : %s is being deleted, skipping configuration!",
- log_this_vnf(vnf_cfg))
- return True
-
- yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_SEND)
- try:
- if vnf_cfg['config_method'] == 'netconf':
- self._log.info("Creating ncc handle for VNF cfg = %s!", vnf_cfg)
- self.ncc = ConfigManagerVNFnetconf(self._log, self._loop, self, vnf_cfg)
- if vnf_cfg['protocol'] == 'ssh':
- yield from self.ncc.connect_ssh()
- else:
- yield from self.ncc.connect()
- yield from self.ncc.apply_edit_cfg()
- elif vnf_cfg['config_method'] == 'rest':
- if self.rcc is None:
- self._log.info("Creating rcc handle for VNF cfg = %s!", vnf_cfg)
- self.rcc = ConfigManagerVNFrestconf(self._log, self._loop, self, vnf_cfg)
- self.ncc.apply_edit_cfg()
- elif vnf_cfg['config_method'] == 'script':
- self._log.info("Executing script for VNF cfg = %s!", vnf_cfg)
- scriptc = ConfigManagerVNFscriptconf(self._log, self._loop, self, vnf_cfg)
- yield from scriptc.apply_edit_cfg()
- elif vnf_cfg['config_method'] == 'juju':
- self._log.info("Executing juju config for VNF cfg = %s!", vnf_cfg)
- jujuc = ConfigManagerVNFjujuconf(self._log, self._loop, self._parent, vnf_cfg)
- yield from jujuc.apply_edit_cfg()
- else:
- self._log.error("Unknown configuration method(%s) received for %s",
- vnf_cfg['config_method'], vnf_cfg['vnf_unique_name'])
- yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.CFG_FAILED)
- return True
-
- #Update VNF state
- yield from self.update_vnf_state(vnf_cfg, conmanY.RecordState.READY)
- self._log.info("Successfully applied configuration to VNF: %s",
- log_this_vnf(vnf_cfg))
- except Exception as e:
- self._log.error("Applying configuration(%s) file(%s) to VNF: %s failed as: %s",
- vnf_cfg['config_method'],
- vnf_cfg['cfg_file'],
- log_this_vnf(vnf_cfg),
- str(e))
- #raise
- return False
-
- return True
-
-class ConfigManagerVNFscriptconf(object):
-
- def __init__(self, log, loop, parent, vnf_cfg):
- self._log = log
- self._loop = loop
- self._parent = parent
- self._manager = None
- self._vnf_cfg = vnf_cfg
-
- #@asyncio.coroutine
- def apply_edit_cfg(self):
- vnf_cfg = self._vnf_cfg
- self._log.debug("Attempting to apply scriptconf to VNF: %s", log_this_vnf(vnf_cfg))
- try:
- st = os.stat(vnf_cfg['cfg_file'])
- os.chmod(vnf_cfg['cfg_file'], st.st_mode | stat.S_IEXEC)
- #script_msg = subprocess.check_output(vnf_cfg['cfg_file'], shell=True).decode('utf-8')
-
- proc = yield from asyncio.create_subprocess_exec(
- vnf_cfg['script_type'], vnf_cfg['cfg_file'],
- stdout=asyncio.subprocess.PIPE)
- script_msg = yield from proc.stdout.read()
- rc = yield from proc.wait()
-
- if rc != 0:
- raise ScriptError(
- "script config returned error code : %s" % rc
- )
-
- self._log.debug("config script output (%s)", script_msg)
- except Exception as e:
- self._log.error("Error (%s) while executing script config for VNF: %s",
- str(e), log_this_vnf(vnf_cfg))
- raise
-
-class ConfigManagerVNFrestconf(object):
-
- def __init__(self, log, loop, parent, vnf_cfg):
- self._log = log
- self._loop = loop
- self._parent = parent
- self._manager = None
- self._vnf_cfg = vnf_cfg
-
- def fetch_handle(self, response):
- if response.error:
- self._log.error("Failed to send HTTP config request - %s", response.error)
- else:
- self._log.debug("Sent HTTP config request - %s", response.body)
-
- @asyncio.coroutine
- def apply_edit_cfg(self):
- vnf_cfg = self._vnf_cfg
- self._log.debug("Attempting to apply restconf to VNF: %s", log_this_vnf(vnf_cfg))
- try:
- http_c = tornadoh.AsyncHTTPClient()
- # TBD
- # Read the config entity from file?
- # Convert connectoin-point?
- http_c.fetch("http://", self.fetch_handle)
- except Exception as e:
- self._log.error("Error (%s) while applying HTTP config", str(e))
-
-class ConfigManagerVNFnetconf(object):
-
- def __init__(self, log, loop, parent, vnf_cfg):
- self._log = log
- self._loop = loop
- self._parent = parent
- self._manager = None
- self._vnf_cfg = vnf_cfg
-
- self._model = RwYang.Model.create_libncx()
- self._model.load_schema_ypbc(conmanY.get_schema())
-
- @asyncio.coroutine
- def connect(self, timeout_secs=120):
- vnf_cfg = self._vnf_cfg
- start_time = time.time()
- self._log.debug("connecting netconf .... %s", vnf_cfg)
- while (time.time() - start_time) < timeout_secs:
-
- try:
- self._log.info("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg))
-
- self._manager = yield from ncclient.asyncio_manager.asyncio_connect(
- loop=self._loop,
- host=vnf_cfg['mgmt_ip_address'],
- port=vnf_cfg['port'],
- username=vnf_cfg['username'],
- password=vnf_cfg['password'],
- allow_agent=False,
- look_for_keys=False,
- hostkey_verify=False,
- )
-
- self._log.info("Netconf connected to VNF: %s", log_this_vnf(vnf_cfg))
- return
-
- except ncclient.transport.errors.SSHError as e:
- yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION)
- self._log.error("Netconf connection to VNF: %s, failed: %s",
- log_this_vnf(vnf_cfg), str(e))
-
- yield from asyncio.sleep(2, loop=self._loop)
-
- raise ConfigManagerROifConnectionError(
- "Failed to connect to VNF: %s within %s seconds" %
- (log_this_vnf(vnf_cfg), timeout_secs)
- )
-
- @asyncio.coroutine
- def connect_ssh(self, timeout_secs=120):
- vnf_cfg = self._vnf_cfg
- start_time = time.time()
-
- if (self._manager != None and self._manager.connected == True):
- self._log.debug("Disconnecting previous session")
- self._manager.close_session
-
- self._log.debug("connecting netconf via SSH .... %s", vnf_cfg)
- while (time.time() - start_time) < timeout_secs:
-
- try:
- yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.CONNECTING)
- self._log.debug("Attemping netconf connection to VNF: %s", log_this_vnf(vnf_cfg))
-
- self._manager = ncclient.asyncio_manager.manager.connect_ssh(
- host=vnf_cfg['mgmt_ip_address'],
- port=vnf_cfg['port'],
- username=vnf_cfg['username'],
- password=vnf_cfg['password'],
- allow_agent=False,
- look_for_keys=False,
- hostkey_verify=False,
- )
-
- yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.NETCONF_SSH_CONNECTED)
- self._log.debug("netconf over SSH connected to VNF: %s", log_this_vnf(vnf_cfg))
- return
-
- except ncclient.transport.errors.SSHError as e:
- yield from self._parent.update_vnf_state(vnf_cfg, conmanY.RecordState.FAILED_CONNECTION)
- self._log.error("Netconf connection to VNF: %s, failed: %s",
- log_this_vnf(vnf_cfg), str(e))
-
- yield from asyncio.sleep(2, loop=self._loop)
-
- raise ConfigManagerROifConnectionError(
- "Failed to connect to VNF: %s within %s seconds" %
- (log_this_vnf(vnf_cfg), timeout_secs)
- )
-
- @asyncio.coroutine
- def apply_edit_cfg(self):
- vnf_cfg = self._vnf_cfg
- self._log.debug("Attempting to apply netconf to VNF: %s", log_this_vnf(vnf_cfg))
-
- if self._manager is None:
- self._log.error("Netconf is not connected to VNF: %s, aborting!", log_this_vnf(vnf_cfg))
- return
-
- # Get config file contents
- try:
- with open(vnf_cfg['cfg_file']) as f:
- configuration = f.read()
- except Exception as e:
- self._log.error("Reading contents of the configuration file(%s) failed: %s", vnf_cfg['cfg_file'], str(e))
- return
-
- try:
- self._log.debug("apply_edit_cfg to VNF: %s", log_this_vnf(vnf_cfg))
- xml = '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">{}</config>'.format(configuration)
- response = yield from self._manager.edit_config(xml, target='running')
- if hasattr(response, 'xml'):
- response_xml = response.xml
- else:
- response_xml = response.data_xml.decode()
-
- self._log.debug("apply_edit_cfg response: %s", response_xml)
- if '<rpc-error>' in response_xml:
- raise ConfigManagerROifConnectionError("apply_edit_cfg response has rpc-error : %s",
- response_xml)
-
- self._log.debug("apply_edit_cfg Successfully applied configuration {%s}", xml)
- except:
- raise
-
-class ConfigManagerVNFjujuconf(object):
-
- def __init__(self, log, loop, parent, vnf_cfg):
- self._log = log
- self._loop = loop
- self._parent = parent
- self._manager = None
- self._vnf_cfg = vnf_cfg
-
- #@asyncio.coroutine
- def apply_edit_cfg(self):
- vnf_cfg = self._vnf_cfg
- self._log.debug("Attempting to apply juju conf to VNF: %s", log_this_vnf(vnf_cfg))
- try:
- args = ['python3',
- vnf_cfg['juju_script'],
- '--server', vnf_cfg['mgmt_ip_address'],
- '--user', vnf_cfg['user'],
- '--password', vnf_cfg['secret'],
- '--port', str(vnf_cfg['port']),
- vnf_cfg['cfg_file']]
- self._log.error("juju script command (%s)", args)
-
- proc = yield from asyncio.create_subprocess_exec(
- *args,
- stdout=asyncio.subprocess.PIPE)
- juju_msg = yield from proc.stdout.read()
- rc = yield from proc.wait()
-
- if rc != 0:
- raise ScriptError(
- "Juju config returned error code : %s" % rc
- )
-
- self._log.debug("Juju config output (%s)", juju_msg)
- except Exception as e:
- self._log.error("Error (%s) while executing juju config", str(e))
- raise
+++ /dev/null
-# This template has all supported TAGs.
-# This template can be used as input to the xlate_cfg.py script as follows:
-
-# python3 ./xlate_cfg.py -i ./rwconman_test_config_template.cfg -o ./rwconman_test_config.cfg -x ./rwconman_test_xlate_dict.yml
-
-
-# This is error
-#0. <rw_connection_point_name test/cp2>
-
-# Following are simple TAGs
-1. This is Management IP: <rw_mgmt_ip>
-2. This is Username: <rw_username>
-3. This is Password: <rw_password>
-4. This is globally unique connection point: <rw_connection_point_name test/cp1>
-
-# Following are colon separated complex TAGs
-5. This is connection point for a given VNF with unique member index: <rw_unique_index:rw_connection_point_name 2:test/cp1>
-6. This is converting connection point IP address into network address: <rw_connection_point:masklen_network test/cp1:24>
-7. This is converting connection point IP address into boadcast address: <rw_connection_point:masklen_broadcast test/cp1:24>
-
-# Following generated tuple with original connectino point name (Global only)
-8. This is not used anywhere: <rw_connection_point_tuple test/cp1>
-
-# Following are multi-colon separated complex TAGs
-9. This is converting connection point IP address into network address VNF with unique member index: <rw_unique_index:rw_connection_point:masklen_network 2:test/cp1:24>
-10. This is converting connection point IP address into network address VNF with unique member index: <rw_unique_index:rw_connection_point:masklen_broadcast 2:test/cp1:24>
-
-# Following test all of the above in single line
-11. All at once: START| rw_mgmt_ip: <rw_mgmt_ip> | rw_username: <rw_username> | rw_password: <rw_password> | global CP: <rw_connection_point_name test/cp1> | 1 CP: <rw_unique_index:rw_connection_point_name 1:test/cp1> | network: <rw_connection_point:masklen_network test/cp1:24> | broadcast: <rw_connection_point:masklen_broadcast test/cp1:24> | tuple: <rw_connection_point_tuple test/cp1> | 2 network: <rw_unique_index:rw_connection_point:masklen_network 2:test/cp1:24> | 2 broadcast: <rw_unique_index:rw_connection_point:masklen_broadcast 2:test/cp1:24> |END
-
-# Need to work on the solution for multiple pattern of same type in single line.
-
+++ /dev/null
-1:
- test/cp1: 11.0.0.1
-2:
- test/cp1: 11.0.0.2
-test/cp1: 11.0.0.3
-rw_mgmt_ip: 1.1.1.1
-rw_username: admin
-rw_password: admin
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
'''
This file - ConfigManagerTasklet()
|
++
+|
+ConfigManagerProject()
+|
+--|--> ConfigurationManager()
|
+--> rwconman_config.py - ConfigManagerConfig()
- | |
- | +--> ConfigManagerNSR()
- |
- +--> rwconman_events.py - ConfigManagerEvents()
- |
- +--> ConfigManagerROif()
+ |
+ +--> ConfigManagerNSR()
'''
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import rwconman_config as Config
-from . import rwconman_events as Event
def log_this_vnf(vnf_cfg):
log_vnf = ""
return log_vnf
class ConfigurationManager(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self._log = log
self._loop = loop
self._dts = dts
+ self._project = project
+
self.cfg_sleep = True
- self.cfg_dir = os.path.join(os.environ["RIFT_INSTALL"], "etc/conman")
self._config = Config.ConfigManagerConfig(self._dts, self._log, self._loop, self)
- self._event = Event.ConfigManagerEvents(self._dts, self._log, self._loop, self)
self.pending_cfg = []
self.pending_tasks = {}
self._nsr_objs = {}
+ self._task = None # The configuration_handler task
self._handlers = [
- self._config,
- self._event,
+ self._config
]
self._log.info("Updating cm-state for NS(%s) to:%s", nsr_obj.nsr_name, state)
yield from nsr_obj.update_ns_cm_state(state)
- def add_to_pending(self, nsr_obj):
+ def add_to_pending(self, nsr_obj, cfg_vnfr_list):
if (nsr_obj not in self.pending_cfg and
nsr_obj.cm_nsr['state'] == nsr_obj.state_to_string(conmanY.RecordState.RECEIVED)):
self._log.info("Adding NS={} to pending config list"
.format(nsr_obj.nsr_name))
- # Build the list
- nsr_obj.vnf_cfg_list = []
- # Sort all the VNF by their configuration attribute priority
- sorted_dict = dict(sorted(nsr_obj.nsr_cfg_config_attributes_dict.items()))
- for config_attributes_dict in sorted_dict.values():
- # Iterate through each priority level
- for config_priority in config_attributes_dict:
- # Iterate through each vnfr at this priority level
- vnfr = nsr_obj._vnfr_dict[config_priority['id']]
- self._log.debug("Adding VNF:(%s) to pending cfg list", log_this_vnf(vnfr['vnf_cfg']))
- nsr_obj.vnf_cfg_list.append(vnfr['vnf_cfg'])
+ for cfg_vnfr in cfg_vnfr_list:
+ self._log.debug("Adding VNF:(%s) to pending cfg list", log_this_vnf(cfg_vnfr['vnf_cfg']))
+ nsr_obj.vnf_cfg_list.append(cfg_vnfr['vnf_cfg'])
self.pending_cfg.append(nsr_obj)
def add_nsr_obj(self, nsr_obj):
del self._nsr_objs[nsr_id]
def get_nsr_obj(self, nsr_id):
+ if nsr_id not in self._nsr_objs:
+ self._log.info("NSR %s not found", nsr_id)
+ return None
self._log.debug("Returning nsr_obj (%s) from Configuration Manager", self._nsr_objs[nsr_id])
return self._nsr_objs.get(nsr_id)
done))
if done:
- self._log.warn("Apply initial config on VNFR {}".
+ self._log.debug("Apply initial config on VNFR {}".
format(log_this_vnf(vnf_cfg)))
try:
yield from nsr_obj.parent.process_vnf_initial_config(
nsr_obj,
- agent_vnfr.vnfr_msg)
+ agent_vnfr.vnfr_msg,
+ self._project.name)
yield from self.update_vnf_state(vnf_cfg,
conmanY.RecordState.READY)
conmanY.RecordState.CFG_FAILED)
else:
+ self._log.debug("Getting config status {}".format(log_this_vnf(vnf_cfg)))
# Check to see if the VNF configure failed
status = yield from self._config._config_agent_mgr.invoke_config_agent_plugins(
'get_config_status',
nsr_obj.nsr_failed = False
self._log.debug("Apply initial config on NSR {}".format(nsr_obj.nsr_name))
try:
- yield from nsr_obj.parent.process_ns_initial_config(nsr_obj)
+ yield from nsr_obj.parent.process_ns_initial_config(nsr_obj, self._project.name)
except Exception as e:
nsr_obj.nsr_failed = True
self._log.exception(e)
yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED)
return ret_status
- # Basically, this loop will never end.
- while True:
- # Check the pending tasks are complete
- # Store a list of tasks that are completed and
- # remove from the pending_tasks list outside loop
- ids = []
- for nsr_id, task in self.pending_tasks.items():
- if task.done():
- ids.append(nsr_id)
- e = task.exception()
- if e:
- self._log.error("Exception in configuring nsr {}: {}".
- format(nsr_id, e))
- nsr_obj = self.get_nsr_obj(nsr_id)
- if nsr_obj:
- yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
-
+ try:
+ # Basically, this loop will never end.
+ while True:
+ # Check the pending tasks are complete
+ # Store a list of tasks that are completed and
+ # remove from the pending_tasks list outside loop
+ ids = []
+ for nsr_id, task in self.pending_tasks.items():
+ if task.done():
+ ids.append(nsr_id)
+ e = task.exception()
+ if e:
+ self._log.error("Exception in configuring nsr {}: {}".
+ format(nsr_id, e))
+ nsr_obj = self.get_nsr_obj(nsr_id)
+ if nsr_obj:
+ yield from nsr_obj.update_ns_cm_state(conmanY.RecordState.CFG_FAILED, str(e))
+
+ else:
+ rc = task.result()
+ self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
else:
- rc = task.result()
- self._log.debug("NSR {} configured: {}".format(nsr_id, rc))
- else:
- self._log.debug("NSR {} still configuring".format(nsr_id))
-
- # Remove the completed tasks
- for nsr_id in ids:
- self.pending_tasks.pop(nsr_id)
-
- # TODO (pjoseph): Fix this
- # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
- # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
- # wrongfully 10 seconds in advance?
- yield from asyncio.sleep(10, loop=self._loop)
-
- if self.pending_cfg:
- # get first NS, pending_cfg is nsr_obj list
- nsr_obj = self.pending_cfg[0]
- nsr_done = False
- if nsr_obj.being_deleted is False:
- # Process this NS, returns back same obj is successfull or exceeded retries
- try:
- self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
-
- # Check if we already have a task running for this NSR
- # Case where we are still configuring and terminate is called
- if nsr_obj.nsr_id in self.pending_tasks:
- self._log.error("NSR {} in state {} has a configure task running.".
- format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
- # Terminate the task for this NSR
- self.pending_tasks[nsr_obj.nsr_id].cancel()
-
- yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
-
- # Call in a separate thread
- self.pending_tasks[nsr_obj.nsr_id] = \
- self._loop.create_task(
- process_nsr_obj(nsr_obj)
- )
-
- # Remove this nsr_obj
- self.pending_cfg.remove(nsr_obj)
-
- except Exception as e:
- self._log.error("Failed to process NSR as %s", str(e))
- self._log.exception(e)
-
+ self._log.debug("NSR {} still configuring".format(nsr_id))
+
+ # Remove the completed tasks
+ for nsr_id in ids:
+ self.pending_tasks.pop(nsr_id)
+
+ # TODO (pjoseph): Fix this
+ # Sleep before processing any NS (Why are we getting multiple NSR running DTS updates?)
+ # If the sleep is not 10 seconds it does not quite work, NSM is marking it 'running'
+ # wrongfully 10 seconds in advance?
+ yield from asyncio.sleep(10, loop=self._loop)
+
+ if self.pending_cfg:
+ # get first NS, pending_cfg is nsr_obj list
+ nsr_obj = self.pending_cfg[0]
+ nsr_done = False
+ if nsr_obj.being_deleted is False:
+ # Process this NS, returns back same obj is successfull or exceeded retries
+ try:
+ self._log.info("Processing NSR:{}".format(nsr_obj.nsr_name))
+
+ # Check if we already have a task running for this NSR
+ # Case where we are still configuring and terminate is called
+ if nsr_obj.nsr_id in self.pending_tasks:
+ self._log.error("NSR {} in state {} has a configure task running.".
+ format(nsr_obj.nsr_name, nsr_obj.get_ns_cm_state()))
+ # Terminate the task for this NSR
+ self.pending_tasks[nsr_obj.nsr_id].cancel()
+
+ yield from self.update_ns_state(nsr_obj, conmanY.RecordState.CFG_PROCESS)
+
+ # Call in a separate thread
+ self.pending_tasks[nsr_obj.nsr_id] = \
+ self._loop.create_task(
+ process_nsr_obj(nsr_obj)
+ )
+
+ # Remove this nsr_obj
+ self.pending_cfg.remove(nsr_obj)
+
+ except Exception as e:
+ self._log.error("Failed to process NSR as %s", str(e))
+ self._log.exception(e)
+
+ except asyncio.CancelledError as e:
+ self._log.debug("Stopped configuration handler for project {}".format(self._project))
@asyncio.coroutine
def register(self):
for reg in self._handlers:
yield from reg.register()
- asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+ self._task = asyncio.ensure_future(self.configuration_handler(), loop=self._loop)
+
+ def deregister(self):
+ self._log.debug("De-register conman for project {}".format(self._project.name))
+ self._task.cancel()
+
+ for reg in self._handlers:
+ reg.deregister()
+
+
+class ConfigManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ConfigManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._con_man = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.info("Initializing the Configuration-Manager tasklet")
+ self._con_man = ConfigurationManager(self.log,
+ self.loop,
+ self._dts,
+ self,)
+ yield from self._con_man.register()
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._con_man.deregister()
+
class ConfigManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
self.rwlog.set_category("rw-conman-log")
self._dts = None
- self._con_man = None
+
+ self.project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ConfigManagerTasklet, self).start()
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Configuration-Manager tasklet")
- self._con_man = ConfigurationManager(self.log,
- self.loop,
- self._dts)
- yield from self._con_man.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ConfigManagerProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
+++ /dev/null
-#!/usr/bin/env python3
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-'''
-This script will go through the input conffiguration template and convert all the matching "regular expression" and "strings"
-specified in xlate_cp_list & xlate_str_list with matching IP addresses passed in as dictionary to this script.
-
--i Configuration template
--o Output final configuration complete with IP addresses
--x Xlate(Translate dictionary in string format
--t TAGS to be translated
-
-'''
-
-import sys
-import getopt
-import ast
-import re
-import yaml
-import netaddr
-
-from inspect import getsourcefile
-import os.path
-
-xlate_dict = None
-
-def xlate_cp_list(line, cp_list):
- for cp_string in cp_list:
- match = re.search(cp_string, line)
- if match is not None:
- # resolve IP address using Connection Point dictionary
- resolved_ip = xlate_dict[match.group(1)]
- if resolved_ip is None:
- print("No matching CP found: ", match.group(1))
- exit(2)
- else:
- line = line[:match.start()] + resolved_ip + line[match.end():]
- return line
-
-def xlate_multi_colon_list(line, multi_colon_list):
- for ucp_string in multi_colon_list:
- #print("Searching :", ucp_string)
- match = re.search(ucp_string, line)
- if match is not None:
- #print("match :", match.group())
- # resolve IP address using Connection Point dictionary for specified member (unique) index
- ucp_str_list = match.group(1).split(':')
- print("matched = {}, split list = {}".format(match.group(1), ucp_str_list))
- if len(ucp_str_list) != 3:
- print("Invalid TAG in the configuration: ", match.group(1))
- exit(2)
-
- # Traslate given CP address & mask into netaddr
- if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen'):
- member_vnf_index = int(ucp_str_list[0])
- resolved_ip = xlate_dict[ucp_str_list[1]]
- masklen = ucp_str_list[2]
- if resolved_ip is None:
- print("No matching CP found: ", ucp_str_list[1])
- exit(2)
- if int(masklen) <= 0:
- print("Invalid mask length: ", masklen)
- exit(2)
- else:
- # Generate netaddr
- ip_str = resolved_ip + '/' + masklen
- #print("ip_str:", ip_str)
- ip = netaddr.IPNetwork(ip_str)
- if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen_broadcast'):
- # Traslate given CP address & mask into broadcast address
- addr = ip.broadcast
- if ucp_string.startswith('<rw_unique_index:rw_connection_point:masklen_network'):
- # Traslate given CP address & mask into network address
- addr = ip.network
- line = line[:match.start()] + str(addr) + line[match.end():]
- return line
-
-
-
-def xlate_colon_list(line, colon_list):
- for ucp_string in colon_list:
- #print("Searching :", ucp_string)
- match = re.search(ucp_string, line)
- if match is not None:
- #print("match :", match.group())
- # resolve IP address using Connection Point dictionary for specified member (unique) index
- ucp_str_list = match.group(1).split(':')
- #print("matched = {}, split list = {}".format(match.group(1), ucp_str_list))
- if len(ucp_str_list) != 2:
- print("Invalid TAG in the configuration: ", match.group(1))
- exit(2)
-
- # Unique Connection Point translation to IP
- if ucp_string.startswith('<rw_unique_index:'):
- member_vnf_index = int(ucp_str_list[0])
- resolved_ip = xlate_dict[member_vnf_index][ucp_str_list[1]]
- #print("member_vnf_index = {}, resolved_ip = {}", member_vnf_index, resolved_ip)
- if resolved_ip is None:
- print("For Unique index ({}), No matching CP found: {}", ucp_str_list[0], ucp_str_list[1])
- exit(2)
- else:
- line = line[:match.start()] + resolved_ip + line[match.end():]
-
- # Traslate given CP address & mask into netaddr
- if ucp_string.startswith('<rw_connection_point:masklen'):
- resolved_ip = xlate_dict[ucp_str_list[0]]
- masklen = ucp_str_list[1]
- if resolved_ip is None:
- print("No matching CP found: ", ucp_str_list[0])
- exit(2)
- if int(masklen) <= 0:
- print("Invalid mask length: ", masklen)
- exit(2)
- else:
- # Generate netaddr
- ip_str = resolved_ip + '/' + masklen
- #print("ip_str:", ip_str)
- ip = netaddr.IPNetwork(ip_str)
-
- if ucp_string.startswith('<rw_connection_point:masklen_broadcast'):
- # Traslate given CP address & mask into broadcast address
- addr = ip.broadcast
- if ucp_string.startswith('<rw_connection_point:masklen_network'):
- # Traslate given CP address & mask into network address
- addr = ip.network
-
- line = line[:match.start()] + str(addr) + line[match.end():]
- return line
-
-def xlate_cp_to_tuple_list(line, cp_to_tuple_list):
- for cp_string in cp_to_tuple_list:
- match = re.search(cp_string, line)
- if match is not None:
- # resolve IP address using Connection Point dictionary
- resolved_ip = xlate_dict[match.group(1)]
- if resolved_ip is None:
- print("No matching CP found: ", match.group(1))
- exit(2)
- else:
- line = line[:match.start()] + match.group(1) + ':' + resolved_ip + line[match.end():]
- return line
-
-def xlate_str_list(line, str_list):
- for replace_tag in str_list:
- replace_string = replace_tag[1:-1]
- line = line.replace(replace_tag, xlate_dict[replace_string])
- return line
-
-
-def main(argv=sys.argv[1:]):
- cfg_template = None
- cfg_file = None
- global xlate_dict
- try:
- opts, args = getopt.getopt(argv,"i:o:x:")
- except getopt.GetoptError:
- print("Check arguments {}".format(argv))
- sys.exit(2)
- for opt, arg in opts:
- if opt == '-i':
- cfg_template = arg
- elif opt in ("-o"):
- cfg_file = arg
- elif opt in ("-x"):
- xlate_arg = arg
-
- # Read TAGS from yaml file
- # Read the translation tags from yaml file
- yml_dir = os.path.dirname(os.path.abspath(getsourcefile(lambda:0)))
- tags_input_file = os.path.join(yml_dir, 'xlate_tags.yml')
- with open(tags_input_file, "r") as ti:
- xlate_tags = yaml.load(ti.read())
-
- # Need to work on the solution for multiple pattern of same type in single line.
- try:
- with open(xlate_arg, "r") as ti:
- xlate_dict = yaml.load(ti.read())
- try:
- with open(cfg_template, 'r') as r:
- try:
- with open(cfg_file, 'w') as w:
- # Traslate
- try:
- # For each line
- for line in r:
- if line.startswith("#"):
- # Skip comment lines
- continue
- #print("1.Line : ", line)
- # For each Connection Point translation to IP
- line = xlate_cp_list(line, xlate_tags['xlate_cp_list'])
- #print("2.Line : ", line)
-
- # For each colon(:) separated tag, i.e. 3 inputs in a tag.
- line = xlate_multi_colon_list(line, xlate_tags['xlate_multi_colon_list'])
- #print("2a.Line : ", line)
-
- # For each colon(:) separated tag, i.e. 2 inputs in a tag.
- line = xlate_colon_list(line, xlate_tags['xlate_colon_list'])
- #print("3.Line : ", line)
-
- # For each connection point to tuple replacement
- line = xlate_cp_to_tuple_list(line, xlate_tags['xlate_cp_to_tuple_list'])
- #print("4.Line : ", line)
-
- # For each direct replacement (currently only management IP address for ping/pong)
- line = xlate_str_list(line, xlate_tags['xlate_str_list'])
- #print("5.Line : ", line)
-
- # Finally write the modified line to the new config file
- w.write(line)
- except Exception as e:
- print("Error ({}) on line: {}".format(str(e), line))
- exit(2)
- except Exception as e:
- print("Failed to open for write: {}, error({})".format(cfg_file, str(e)))
- exit(2)
- except Exception as e:
- print("Failed to open for read: {}, error({})".format(cfg_template, str(e)))
- exit(2)
- print("Wrote configuration file", cfg_file)
- except Exception as e:
- print("Could not translate dictionary, error: ", str(e))
-
-if __name__ == "__main__":
- try:
- main()
- except Exception as e:
- print(str(e))
+++ /dev/null
-# """
-# #
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# @file xlate_tags.yml
-# @author Manish Patel (Manish.Patel@riftio.com)
-# @date 01/14/2016
-# """
-
-# This file contains the tags that needs translation
-# One can add some tags with processing limitations by the translation script.
-
-# Add Regular expressions here (connection-points received dynamically from VNFR)
-
-# Translate connection point names (Connection point name is read using RegEx)
-
-xlate_cp_list :
- - <rw_connection_point_name (.*?)>
-
-# Literal string translations
-xlate_str_list :
- - <rw_mgmt_ip>
- - <rw_username>
- - <rw_password>
-
-# This list contains 2 tags separated by colon (:)
-xlate_colon_list :
- # Fetch CP from the member_index dictionary (I.e. CP of a particular VNF)
- - <rw_unique_index:rw_connection_point_name (.*?)>
- # Generate network address from CP address and mask (mask is expected to be a hard coded number in config)
- - <rw_connection_point:masklen_network (.*?)>
- # Generate broadcast address from CP address and mask (mask is expected to be a hard coded number in config)
- - <rw_connection_point:masklen_broadcast (.*?)>
-
-# This list contains 3 tags separated by colon (:)
-xlate_multi_colon_list :
- # Generate network address from CP of a particular VNF (mask is expected to be a hard coded number in config))
- - <rw_unique_index:rw_connection_point:masklen_network (.*?)>
- # Generate broadcast address from CP of a particular VNF (mask is expected to be a hard coded number in config))
- - <rw_unique_index:rw_connection_point:masklen_broadcast (.*?)>
-
-# This translates connection point name and generates tuple with name:resolved IP
-xlate_cp_to_tuple_list :
- - <rw_connection_point_tuple (.*?)>
-
rift_add_yang_target(
TARGET rw_conman_yang
YANG_FILES rw-conman.yang ${rw_conman_log_file}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
mano_yang_gen
mano-types_yang_gen
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix conman;
}
- tailf:annotate "/conman:cm-state" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/conman:cm-state" {
tailf:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-conman";
prefix "rw-conman";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
+ }
+
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
+ }
+
import nsr {
prefix "nsr";
}
- import vnfr {
- prefix "vnfr";
+ import rw-nsr {
+ prefix "rw-nsr";
+ }
+
+ import rw-vnfr {
+ prefix "rw-vnfr";
}
import rw-vlr {
prefix "rw-config-agent";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-27 {
description
"Initial revision.";
leaf ro-username {
description "RO endpoint username";
type string;
- default "admin";
+ default "@rift";
}
leaf ro-password {
description "RO endpoint password";
type string;
- default "admin";
- }
- }
-
- grouping vnf-cfg-items {
- leaf configuration-file {
- description "Location of the confguration file on CM system";
- type string;
- }
- leaf translator-script {
- description "Script that translates the templates in the configuration-file using VNFR information
- Currently, we only use IP address translations.
- configuration will use connection point name instead of IP addresses.";
- type string;
+ default "rift";
}
}
- container cm-config {
- description "Service Orchestrator specific configuration";
- rwpb:msg-new "SoConfig";
- rwcli:new-mode "cm-config";
-
- container ro-endpoint {
- description "Resource Orchestrator endpoint ip address";
- rwpb:msg-new "RoEndpoint";
- uses ro-endpoint;
- }
-
- //uses vnf-cfg-items;
-
- list nsr {
- key "id";
- leaf id {
- description "Indicates NSR bringup complete, now initiate configuration of the NSR";
- type yang:uuid;
- }
- }
- }// cm-config
-
// =================== SHOW ==================
typedef record-state {
type enumeration {
enum init;
enum received;
- enum cfg-delay;
enum cfg-process;
enum cfg-process-failed;
enum cfg-sched;
enum connecting;
enum failed-connection;
- enum netconf-connected;
- enum netconf-ssh-connected;
- enum restconf-connected;
enum cfg-send;
enum cfg-failed;
enum ready-no-cfg;
enum ready;
+ enum terminate;
}
}
- // TBD: Do we need this typedef, currently not used anywhere
- typedef cfg-type {
- type enumeration {
- enum none;
- enum scriptconf;
- enum netconf;
- enum restconf;
- enum jujuconf;
+
+ grouping primitive-parameter {
+ leaf name {
+ description
+ "Name of the parameter.";
+ type string;
}
- }
+ leaf data-type {
+ description
+ "Data type associated with the name.";
+ type manotypes:parameter-data-type;
+ }
- // This is also used by RO (Resource Orchestrator) to indicate NSR is ready
- // It will only fill in IDs
- container cm-state {
- rwpb:msg-new "CmOpdata";
- config false;
- description "CM NS & VNF states";
+ leaf mandatory {
+ description "Is this field mandatory";
+ type boolean;
+ default false;
+ }
- leaf states {
- description "CM various states";
+ leaf default-value {
+ description "The default value for this field";
type string;
}
-
- list cm-nsr {
- description "List of NS Records";
- key "id";
- leaf id {
- type yang:uuid;
+
+ leaf parameter-pool {
+ description "NSD parameter pool name to use for this parameter";
+ type string;
+ }
+
+ leaf read-only {
+ description
+ "The value should be dimmed by the UI.
+ Only applies to parameters with default values.";
+ type boolean;
+ default false;
+ }
+
+ leaf hidden {
+ description
+ "The value should be hidden by the UI.
+ Only applies to parameters with default values.";
+ type boolean;
+ default false;
+ }
+
+ leaf out {
+ description "If this is an output of the primitive execution";
+ type boolean;
+ default false;
+ }
+ }
+
+ grouping vnf-configuration {
+ container vnf-configuration {
+ description
+ "Information about the VNF configuration. Note:
+ If the NS contains multiple instances of the
+ same VNF, each instance could have a different
+ configuration.";
+
+ choice config-method {
+ description
+ "Defines the configuration method for the VNF.";
+ case script {
+ description
+ "Use custom script for configuring the VNF.
+ This script is executed in the context of
+ Orchestrator (The same system and environment
+ as the Launchpad).";
+ container script {
+ leaf script-type {
+ description
+ "Script type - currently supported - Scripts confirming to Rift CA plugin";
+ type enumeration {
+ enum rift;
+ }
+ }
+ }
+ }
+
+ case juju {
+ description
+ "Configure the VNF through Juju.";
+ container juju {
+ leaf charm {
+ description "Juju charm to use with the VNF.";
+ type string;
+ }
+ }
+ }
}
- leaf name {
- description "NSR name.";
- type string;
+
+ list config-primitive {
+ description
+ "List of config primitives supported by the
+ configuration agent for this VNF.";
+ key "name";
+
+ leaf name {
+ description
+ "Name of the config primitive.";
+ type string;
+ }
+
+ list parameter {
+ description
+ "List of parameters to the config primitive.";
+ key "name";
+ uses primitive-parameter;
+ }
+
+ leaf user-defined-script {
+ description
+ "A user defined script. If user defined script is defined,
+ the script will be executed using bash";
+ type string;
+ }
}
- leaf state {
- description "State of NSR";
- type record-state;
+
+ list initial-config-primitive {
+ description
+ "Initial set of configuration primitives.";
+ key "seq";
+ leaf seq {
+ description
+ "Sequence number for the configuration primitive.";
+ type uint64;
+ }
+
+ choice primitive-type {
+ case primitive-definition {
+ leaf name {
+ description
+ "Name of the configuration primitive.";
+ type string;
+ }
+
+ uses manotypes:primitive-parameter-value;
+
+ leaf user-defined-script {
+ description
+ "A user defined script.";
+ type string;
+ }
+ }
+ case primitive-ref {
+ leaf config-primitive-ref {
+ description
+ "Reference to a config primitive name.
+ NOTE: The config primitive referred should have
+ all the input parameters predefined either
+ with default values or dependency references.";
+ type leafref {
+ path "../../config-primitive/name";
+ }
+ }
+ }
+ }
}
- leaf state-details {
- description "Details of the state of NSR, in case of errors";
+ }
+ } // END - grouping vnf-configuration
+
+
+ // This is also used by RO (Resource Orchestrator) to indicate NSR is ready
+ // It will only fill in IDs
+ augment "/rw-project:project" {
+ container cm-state {
+ config false;
+ description "CM NS & VNF states";
+
+ leaf states {
+ description "CM various states";
type string;
}
-
- list cm-vnfr {
- description "List of VNF Records within NS Record";
+
+ list cm-nsr {
+ description "List of NS Records";
key "id";
leaf id {
type yang:uuid;
}
leaf name {
- description "VNFR name.";
+ description "NSR name.";
type string;
}
leaf state {
- description "Last known state of this VNFR";
+ description "State of NSR";
type record-state;
}
- container mgmt-interface {
- leaf ip-address {
- type inet:ip-address;
- }
- leaf port {
- type inet:port-number;
- }
- }
- leaf cfg-type {
+ leaf state-details {
+ description "Details of the state of NSR, in case of errors";
type string;
}
- leaf cfg-location {
- type inet:uri;
- }
- list connection-point {
- key "name";
+
+ list cm-vnfr {
+ description "List of VNF Records within NS Record";
+ key "id";
+ leaf id {
+ type yang:uuid;
+ }
leaf name {
- description "Connection Point name";
+ description "VNFR name.";
type string;
}
- leaf ip-address {
- description "IP address assigned to this connection point";
- type inet:ip-address;
+ leaf state {
+ description "Last known state of this VNFR";
+ type record-state;
}
- }
- } // list VNFR
- } // list NSR
- } // cm-state
-
+
+ container mgmt-interface {
+ leaf ip-address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ leaf cfg-type {
+ type string;
+ }
+ list connection-point {
+ key "name";
+ leaf name {
+ description "Connection Point name";
+ type string;
+ }
+ leaf ip-address {
+ description "IP address assigned to this connection point";
+ type inet:ip-address;
+ }
+ leaf connection-point-id {
+ type string;
+ }
+ }
+
+ // Publish the resolved capabilites and dependecies here
+ container config-parameter {
+ description
+ "The resolved config parameter for a VNF";
+ list config-parameter-source {
+ description "List of resolved sources";
+ key "name";
+
+ leaf name {
+ type string {
+ length 1..128;
+ }
+ description
+ "Name of the source as defined in VNFD";
+ }
+ leaf value {
+ type string;
+ description
+ "Resovled value for the source";
+ }
+ list parameter {
+ key "config-primitive-ref";
+ leaf config-primitive-ref {
+ type string;
+ }
+ leaf parameter-ref {
+ type string;
+ }
+ }
+ }
+
+ list config-parameter-request {
+ description
+ "List of resolved requests for the the VNF";
+ key "name";
+
+ leaf name {
+ type string {
+ length 1..128;
+ }
+ description
+ "Name of the request as defined in VNFD";
+ }
+ leaf value {
+ type string;
+ description
+ "Resovled value for the request";
+ }
+ list parameter {
+ key "config-primitive-ref";
+ leaf config-primitive-ref {
+ type string;
+ }
+ leaf parameter-ref {
+ type string;
+ }
+ }
+ }
+ }
+
+ // Resolve the VNF config-primitives and publish it
+ // here. VNFR will use this and update config-primitives
+ // from here.
+ uses vnf-configuration;
+ } // list VNFR
+ } // list NSR
+ } // cm-state
+ }
} // rw-conman
start_cm_system.py
README.start_cm
DESTINATION ${CONMAN_INSTALL}
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+)
# set(NS_NAME ping_pong_nsd)
# install(
# ${NS_NAME}/ping_vnfd_1_scriptconf_template.cfg
# ${NS_NAME}/pong_vnfd_11_scriptconf_template.cfg
# DESTINATION ${CONMAN_INSTALL}/${NS_NAME}
-# COMPONENT ${PKG_LONG_NAME})
+# COMPONENT ${INSTALL_COMPONENT}
+# )
import asyncio
+import gi
import logging
import os
import sys
import types
import unittest
import uuid
-
import xmlrunner
import gi.repository.CF as cf
import gi.repository.RwManifestYang as rwmanifest
import gi.repository.RwConmanYang as conmanY
import gi.repository.RwLaunchpadYang as launchpadyang
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
return ret
def get_cloud_account_msg(self):
- cloud_account = launchpadyang.CloudAccount()
+ cloud_account = launchpadyang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
cloud_account.name = "cloudy"
cloud_account.account_type = "mock"
cloud_account.mock.username = "rainy"
return cloud_account
def get_compute_pool_msg(self, name, pool_type):
- pool_config = rmgryang.ResourcePools()
+ pool_config = rmgryang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools()
pool = pool_config.pools.add()
pool.name = name
pool.resource_type = "compute"
return pool_config
def get_network_pool_msg(self, name, pool_type):
- pool_config = rmgryang.ResourcePools()
+ pool_config = rmgryang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools()
pool = pool_config.pools.add()
pool.name = name
pool.resource_type = "network"
def get_network_reserve_msg(self, xpath):
event_id = str(uuid.uuid4())
- msg = rmgryang.VirtualLinkEventData()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData()
msg.event_id = event_id
msg.request_info.name = "mynet"
msg.request_info.subnet = "1.1.1.0/24"
- return msg, xpath.format(event_id)
+ return msg, xpath.format(quoted_key(event_id))
def get_compute_reserve_msg(self,xpath):
event_id = str(uuid.uuid4())
- msg = rmgryang.VDUEventData()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData()
msg.event_id = event_id
msg.request_info.name = "mynet"
msg.request_info.image_id = "This is a image_id"
c1 = msg.request_info.connection_points.add()
c1.name = "myport1"
c1.virtual_link_id = "This is a network_id"
- return msg, xpath.format(event_id)
+ return msg, xpath.format(quoted_key(event_id))
def test_create_resource_pools(self):
self.log.debug("STARTING - test_create_resource_pools")
tinfo = self.new_tinfo('poolconfig')
dts = rift.tasklets.DTS(tinfo, self.schema, self.loop)
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
account_xpath = "C,/rw-launchpad:cloud-account"
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id={}]"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id={}]"
@asyncio.coroutine
def configure_cloud_account():
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME rwlaunchpad)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs
mock
plugins
YANG_FILES
lpmocklet.yang
COMPONENT
- ${PKG_LONG_NAME}
+ ${INSTALL_COMPONENT}
LIBRARIES
mano-types_yang_gen
)
namespace "http://riftio.com/ns/riftware-1.0/lpmocklet";
prefix "lpmocklet";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
rpc start-vnfr {
input {
- rwpb:msg-new "StartVnfrInput";
leaf id {
type yang:uuid;
mandatory true;
}
}
output {
- rwpb:msg-new "StartVnfrOutput";
leaf status {
description "status of the start request";
type string;
rpc stop-vnfr {
input {
- rwpb:msg-new "StopVnfr";
leaf id {
type yang:uuid;
mandatory true;
install(
FILES
cli_launchpad_schema_listing.txt
+ cli_launchpad_rift_specific_schema_listing.txt
DESTINATION usr/data/manifest
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
--- /dev/null
+rw-project-nsd
+rw-project-vnfd
+rw-nsr
+rw-vnfr
\ No newline at end of file
-ietf-inet-types
ietf-l2-topology
-ietf-netconf-notifications
ietf-network
ietf-network-topology
-ietf-restconf-monitoring
-ietf-yang-types
-mano-types
-nsd
+nsd-base
nsr
-rw-base
-rwcal
-rw-cli-ext
+project-nsd
+project-vnfd
rw-cloud
+rw-ro-account
rw-config-agent
rw-conman
-rw-debug
-rw-dts
-rw-dtsperf
-rw-dtsperfmgr
rw-launchpad
rw-image-mgmt
rw-pkg-mgmt
rw-staging-mgmt
-rw-log
-rwlog-mgmt
-rw-manifest
-rw-memlog
-rw-mgmtagt
-rw-mgmt-schema
-rwmsg-data
-rw-netconf
-rw-restconf
-rw-notify-ext
-rw-nsd
rw-nsm
-rw-nsr
-rw-pb-ext
+rw-project-mano
rw-resource-mgr
-rw-restportforward
rwsdnal
rw-sdn
-rwshell-mgmt
-rw-sorch
rw-topology
-rw-vcs
-rwvcs-types
rw-vld
rw-vlr
-rw-vnfd
-rw-vnfr
+rw-vnfd-base
rw-yang-types
+rw-ha
vld
vlr
-vnfd
+vnfd-base
vnffgd
vnfr
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/engine.py
rift/tasklets/${TASKLET_NAME}/scaling_operation.py
rift/tasklets/${TASKLET_NAME}/subscribers.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# 0 -> contains a list of all timestamps
# 1 -> contains a list of all values.
- self._series = numpy.empty(shape=(2, 1), dtype='int64')
+ # self._series = numpy.empty(shape=(2, 1), dtype='int64')
+ self._series = numpy.array([[],[]], dtype='int64')
self.threshold_time = threshold_time
def add_value(self, timestamp, value):
def is_window_full(self):
"""Verify if there is sufficient data for the current window.
"""
- if len(self._series[0]) <= 2:
+ if len(self._series[0]) < 2:
return False
start_time = self._series[0][0]
log,
dts,
loop,
+ project,
nsr_id,
monp_id,
scaling_criteria,
self.log,
self.dts,
self.loop,
+ project,
self.nsr_id,
self.monp_id,
callback=self.add_value)
"""
if self._timeseries.average() >= self.scale_out:
- # Enable the scale in limit, only when a scale-out has happened.
- self._scl_in_limit_enabled = True
+ self.log.info("Triggering a scaling-out request for the criteria {}".format(
+ self.name))
self.delegate.threshold_out_breached(self.name, avg)
- elif self._timeseries.average() < self.scale_in and self._scl_in_limit_enabled:
- self._scl_in_limit_enabled = False
+ elif self._timeseries.average() < self.scale_in :
+ self.log.info("Triggering a scaling-in request for the criteria {}".format(
+ self.name))
self.delegate.threshold_in_breached(self.name, avg)
if not self._timeseries.is_window_full():
return
+ self.log.debug("Sufficient sampling data obtained for criteria {}."
+ "Checking the scaling condition for the criteria".format(
+ self.name))
+
if not self.delegate:
return
class ScalingPolicy(ScalingCriteria.Delegate):
class Delegate:
@abc.abstractmethod
- def scale_in(self, scaling_group_name, nsr_id):
+ def scale_in(self, scaling_group_name, nsr_id, instance_id):
"""Delegate called when all the criteria for scaling-in are met.
Args:
log,
dts,
loop,
+ project,
nsr_id,
nsd_id,
scaling_group_name,
self.loop = loop
self.log = log
self.dts = dts
+ self.project = project
self.nsd_id = nsd_id
self.nsr_id = nsr_id
self.scaling_group_name = scaling_group_name
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
callback=self.handle_nsr_monp)
+ self.nsr_scale_sub = monp_subscriber.NsrScalingGroupRecordSubscriber(
+ self.log,
+ self.dts,
+ self.loop,
+ self.project,
+ self.nsr_id,
+ self.scaling_group_name)
+
self.criteria_store = {}
# Timestamp at which the scale-in/scale-out request was generated.
self._last_triggered_time = None
self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
+ self.scale_out_count = 0
def get_nsd_monp_cfg(self, nsr_monp):
"""Get the NSD's mon-param config.
Args:
monp : Yang Model
action : rwdts.QueryAction
-
+
"""
def handle_create():
if monp.id in self.criteria_store:
# Create a criteria object as soon as the first monitoring data
# is published.
+ self.log.debug("Created a ScalingCriteria monitor for {}".format(
+ cri.as_dict()))
+
criteria = ScalingCriteria(
self.log,
self.dts,
self.loop,
+ self.project,
self.nsr_id,
monp.id,
cri,
@asyncio.coroutine
def register(self):
yield from self.monp_sub.register()
+ yield from self.nsr_scale_sub.register()
def deregister(self):
self.monp_sub.deregister()
return True
+ def can_trigger_action(self):
+ if self._is_in_cooldown():
+ self.log.debug("In cooldown phase ignoring the scale action ")
+ return False
+
+ return True
+
+
def threshold_in_breached(self, criteria_name, value):
"""Delegate callback when scale-in threshold is breached
criteria_name : Criteria name
value : Average value
"""
- if self._is_in_cooldown():
+ self.log.debug("Avg value {} has fallen below the threshold limit for "
+ "{}".format(value, criteria_name))
+
+ if not self.can_trigger_action():
+ return
+
+ if self.scale_out_count < 1:
+ self.log.debug('There is no scaled-out VNFs at this point. Hence ignoring the scale-in')
return
self.scale_in_status[criteria_name] = True
+ self.log.info("Applying {} operation to check if all criteria {} for"
+ " scale-in-threshold are met".format(
+ self.scale_out_op,
+ self.scale_out_status))
statuses = self.scale_in_status.values()
is_breached = self.scale_in_op(statuses)
if is_breached and self.delegate:
- self._last_triggered_time = time.time()
- # Reset all statuses
- self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
- self.delegate.scale_in(self.scaling_group_name, self.nsr_id)
+ self.log.info("Triggering a scale-in action for policy {} as "
+ "all criteria have been met".format(self.name))
+
+ @asyncio.coroutine
+ def check_and_scale_in():
+ # data = yield from self.nsr_scale_sub.data()
+ # if len(data) <= 1:
+ # return
+
+ # # Get an instance ID
+ # instance_id = data[-1].instance_id
+
+ instance_id = 0 #assigning a value to follow existing scale_in signature
+ self._last_triggered_time = time.time()
+ self.scale_out_count -= 1
+ # Reset all statuses
+ self.scale_in_status = {cri.name: False for cri in self.scaling_criteria}
+ self.delegate.scale_in(self.scaling_group_name, self.nsr_id, instance_id)
+
+ self.loop.create_task(check_and_scale_in())
def threshold_out_breached(self, criteria_name, value):
"""Delegate callback when scale-out threshold is breached.
criteria_name : Criteria name
value : Average value
"""
- if self._is_in_cooldown():
+ self.log.debug("Avg value {} has gone above the threshold limit for "
+ "{}".format(value, criteria_name))
+
+ if not self.can_trigger_action():
return
self.scale_out_status[criteria_name] = True
+ self.log.info("Applying {} operation to check if all criteria {} for"
+ " scale-out-threshold are met".format(
+ self.scale_out_op,
+ self.scale_out_status))
+
statuses = self.scale_out_status.values()
is_breached = self.scale_out_op(statuses)
if is_breached and self.delegate:
+ self.log.info("Triggering a scale-out action for policy {} as "
+ "all criteria have been met".format(self.name))
self._last_triggered_time = time.time()
+ self.scale_out_count += 1
# Reset all statuses
self.scale_out_status = {cri.name: False for cri in self.scaling_criteria}
self.delegate.scale_out(self.scaling_group_name, self.nsr_id)
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
+class AutoScalerProject(ManoProject, engine.ScalingPolicy.Delegate):
+ def __init__(self, name, tasklet, **kw):
+ super(AutoScalerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
-class AutoScalerTasklet(rift.tasklets.Tasklet, engine.ScalingPolicy.Delegate):
- """The main task of this Tasklet is to listen for NSR changes and once the
- NSR is configured, ScalingPolicy is created.
- """
- def __init__(self, *args, **kwargs):
-
- try:
- super().__init__(*args, **kwargs)
- self.store = None
- self.monparam_store = None
-
- self.nsr_sub = None
- self.nsr_monp_subscribers = {}
- self.instance_id_store = collections.defaultdict(list)
-
- except Exception as e:
- self.log.exception(e)
-
- def start(self):
- super().start()
-
- self.log.debug("Registering with dts")
-
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
+ self.store = None
+ self.monparam_store = None
+ self.nsr_sub = None
+ self.nsr_monp_subscribers = {}
+ self.instance_id_store = collections.defaultdict(list)
- self.store = subscriber.SubscriberStore.from_tasklet(self)
- self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop, self.handle_nsr)
+ self.store = subscriber.SubscriberStore.from_project(self)
+ self.nsr_sub = subscriber.NsrCatalogSubscriber(self.log, self.dts, self.loop,
+ self, self.handle_nsr)
- self.log.debug("Created DTS Api GI Object: %s", self.dts)
+ def deregister(self):
+ self.log.debug("De-register project {}".format(self.name))
+ self.nsr_sub.deregister()
+ self.store.deregister()
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating vnfr subscriber")
yield from self.store.register()
yield from self.nsr_sub.register()
- @asyncio.coroutine
- def run(self):
- pass
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
-
- def scale_in(self, scaling_group_name, nsr_id):
+ def scale_in(self, scaling_group_name, nsr_id, instance_id):
"""Delegate callback
Args:
scaling_group_name (str): Scaling group name to be scaled in
nsr_id (str): NSR id
+ instance_id (str): Instance id of the scaling group
"""
self.log.info("Sending a scaling-in request for {} in NSR: {}".format(
@asyncio.coroutine
def _scale_in():
- instance_id = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
+ # Purposely ignore passed instance_id
+ instance_id_ = self.instance_id_store[(scaling_group_name, nsr_id)].pop()
# Trigger an rpc
rpc_ip = NsrYang.YangInput_Nsr_ExecScaleIn.from_dict({
+ 'project_name': self.name,
'nsr_id_ref': nsr_id,
- 'instance_id': instance_id,
+ 'instance_id': instance_id_,
'scaling_group_name_ref': scaling_group_name})
rpc_out = yield from self.dts.query_rpc(
0,
rpc_ip)
- self.loop.create_task(_scale_in())
+ # Check for existing scaled-out VNFs if any.
+ if len(self.instance_id_store):
+ self.loop.create_task(_scale_in())
def scale_out(self, scaling_group_name, nsr_id):
"""Delegate callback for scale out requests
def _scale_out():
# Trigger an rpc
rpc_ip = NsrYang.YangInput_Nsr_ExecScaleOut.from_dict({
+ 'project_name': self.name,
'nsr_id_ref': nsr_id ,
'scaling_group_name_ref': scaling_group_name})
NS that moves to config state.
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
action (rwdts.QueryAction): Action type of the change.
"""
def nsr_create():
nsr_id = nsr.ns_instance_config_ref
self.nsr_monp_subscribers[nsr_id] = []
nsd = self.store.get_nsd(nsr.nsd_ref)
+ self.log.debug ("Creating a scaling policy monitor for NSR: {}".format(
+ nsr_id))
+
@asyncio.coroutine
def task():
for scaling_group in nsd.scaling_group_descriptor:
for policy_cfg in scaling_group.scaling_policy:
policy = engine.ScalingPolicy(
- self.log, self.dts, self.loop,
+ self.log, self.dts, self.loop, self,
nsr.ns_instance_config_ref,
nsr.nsd_ref,
scaling_group.name,
delegate=self)
self.nsr_monp_subscribers[nsr_id].append(policy)
yield from policy.register()
+ self.log.debug ("Started a scaling policy monitor for NSR: {}".format(
+ nsr_id))
+
self.loop.create_task(task())
for policy in policies:
policy.deregister()
del self.nsr_monp_subscribers[nsr.ns_instance_config_ref]
+ self.log.debug ("Deleted the scaling policy monitor for NSD: {}".format(
+ nsr.ns_instance_config_ref))
+
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
nsr_create()
elif action == rwdts.QueryAction.DELETE:
nsr_delete()
+
+
+class AutoScalerTasklet(rift.tasklets.Tasklet):
+ """The main task of this Tasklet is to listen for NSR changes and once the
+ NSR is configured, ScalingPolicy is created.
+ """
+ def __init__(self, *args, **kwargs):
+
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+
+ self._project_handler = None
+ self.projects = {}
+
+ except Exception as e:
+ self.log.exception(e)
+
+ def start(self):
+ super().start()
+
+ self.log.debug("Registering with dts")
+
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, AutoScalerProject)
+ self.project_handler.register()
+
+ @asyncio.coroutine
+ def run(self):
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# limitations under the License.
#
+import gi
+
import rift.mano.dts as mano_dts
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class NsrMonParamSubscriber(mano_dts.AbstractOpdataSubscriber):
"""Registers for NSR monitoring parameter changes.
-
+
Attributes:
monp_id (str): Monitoring Param ID
nsr_id (str): NSR ID
"""
- def __init__(self, log, dts, loop, nsr_id, monp_id=None, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, nsr_id, monp_id=None, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.nsr_id = nsr_id
self.monp_id = monp_id
def get_xpath(self):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- "[nsr:ns-instance-config-ref='{}']".format(self.nsr_id) +
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ "[nsr:ns-instance-config-ref={}]".format(quoted_key(self.nsr_id)) +
"/nsr:monitoring-param" +
- ("[nsr:id='{}']".format(self.monp_id) if self.monp_id else ""))
+ ("[nsr:id={}]".format(quoted_key(self.monp_id)) if self.monp_id else ""))
+class NsrScalingGroupRecordSubscriber(mano_dts.AbstractOpdataSubscriber):
+ def __init__(self, log, dts, loop, project, nsr_id, scaling_group, callback=None):
+ super().__init__(log, dts, loop, project, callback)
+ self.nsr_id = nsr_id
+ self.scaling_group = scaling_group
+
+ def get_xpath(self):
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ "[nsr:ns-instance-config-ref={}]".format(quoted_key(self.nsr_id)) +
+ "/nsr:scaling-group-record" +
+ "[nsr:scaling-group-name-ref={}]/instance".format(quoted_key(self.scaling_group)))
#!/usr/bin/env python3
-#
+
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import argparse
import asyncio
+import gi
+import logging
import os
+import random
import sys
import unittest
-import random
-
import xmlrunner
+
import unittest.mock as mock
import rift.test.dts
import rift.tasklets.rwautoscaler.engine as engine
-import gi
gi.require_version('RwDtsYang', '1.0')
from gi.repository import (
RwNsrYang,
NsrYang,
- NsdYang,
+ ProjectNsdYang as NsdYang,
RwLaunchpadYang as launchpadyang,
RwVnfrYang,
- RwVnfdYang,
- RwNsdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
VnfrYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
-ScalingCriteria = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
-ScalingPolicy = NsdYang.YangData_Nsd_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
+ScalingCriteria = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy_ScalingCriteria
+ScalingPolicy = NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_ScalingGroupDescriptor_ScalingPolicy
class MockDelegate(engine.ScalingCriteria.Delegate):
def __init__(self, aggregation_type="AVERAGE", legacy=False):
self.aggregation_type = aggregation_type
self.legacy = legacy
- self.threshold_time = 3
+ self.threshold_time = 2
def __call__(self):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({'id': '1'})
- mock_vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({'id': '1'})
+ mock_vnfr.vnfd = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'nsd_ref': '1',
scale_in_val = 100
scale_out_val = 200
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': '1',
+ 'name': 'mock',
+ 'short_name': 'm',
'monitoring_param': (monp_cfg if not self.legacy else []),
'constituent_vnfd': [{'member_vnf_index': 1,
'start_by_default': True,
def _populate_mock_values(self, criterias, nsr_id, floor, ceil):
# Mock publish
# Verify Scale in AND operator
- NsMonParam = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ NsMonParam = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
publisher = rift.test.dts.DescriptorPublisher(self.log, self.dts, self.loop)
for criteria in criterias:
monp_id = criteria.ns_monitoring_param_ref
- w_xpath = "D,/nsr:ns-instance-opdata/nsr:nsr"
- w_xpath = w_xpath + "[nsr:ns-instance-config-ref='{}']/nsr:monitoring-param".format(nsr_id)
- xpath = w_xpath + "[nsr:id ='{}']".format(monp_id)
+ w_xpath = "D,/rw-project:project/nsr:ns-instance-opdata/nsr:nsr"
+ w_xpath = w_xpath + "[nsr:ns-instance-config-ref={}]/nsr:monitoring-param".format(quoted_key(nsr_id))
+ xpath = w_xpath + "[nsr:id={}]".format(quoted_key(monp_id))
- for i in range(self.mock_store.threshold_time + 1):
+ for i in range(self.mock_store.threshold_time + 2):
value = random.randint(floor, ceil)
monp = NsMonParam.from_dict({
yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
assert mock_delegate.scale_in_called == 0
- # Test 2: AND operation
+ # Test 2: AND operation
yield from scale_out(policy)
yield from self._populate_mock_values(policy.scaling_criteria, nsr_id, floor, ceil)
assert mock_delegate.scale_in_called == 1
assert mock_delegate.scale_in_called == 1
@rift.test.dts.async_test
- def _test_scale_out(self):
+ def test_scale_out(self):
""" Tests scale out
Asserts:
1. Scale out
2. Scale out doesn't happen during cooldown
- 3. AND operation
+ 3. AND operation
4. OR operation.
"""
store = self.mock_store()
def main():
+ logging.basicConfig(format='TEST %(message)s')
runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
parser = argparse.ArgumentParser()
if args.no_runner:
runner = None
+ # Set the global logging level
+ logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/rwimagemgr/lib/__init__.py
rift/tasklets/rwimagemgr/lib/quickproxy/__init__.py
rift/tasklets/rwimagemgr/lib/quickproxy/proxy.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
rift_python_install_tree(
FILES
rift/imagemgr/__init__.py
rift/imagemgr/client.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
install(
bin/glance_start_wrapper
DESTINATION
usr/bin
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
if($ENV{RIFT_PLATFORM} MATCHES "fc20")
etc/fc20/glance-api-dist-paste.ini
DESTINATION
etc/glance
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
elseif($ENV{RIFT_PLATFORM} MATCHES "ub16")
install(
etc/ub16/schema-image.json
DESTINATION
etc/glance
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
else()
message(FATAL_ERROR "Unknown platform $ENV{RIFT_PLATFORM}")
def create_account(log):
- account_msg = RwCloudYang.CloudAccount.from_dict(dict(
+ account_msg = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(dict(
name="openstack",
account_type="openstack",
openstack=dict(
#image_size_cap = 1099511627776
# Address to bind the API server
-bind_host = 0.0.0.0
+bind_host = 127.0.0.1
# Port the bind the API server to
bind_port = 9292
debug=True
# Address to bind the registry server
-bind_host = 0.0.0.0
+bind_host = 127.0.0.1
# Port the bind the registry server to
bind_port = 9191
# Address to bind the server. Useful when selecting a particular
# network interface. (string value)
-bind_host = 0.0.0.0
+bind_host = 127.0.0.1
# The port on which the server will listen. (port value)
# Minimum value: 0
# Address to bind the server. Useful when selecting a particular
# network interface. (string value)
-bind_host = 0.0.0.0
+bind_host = 127.0.0.1
# The port on which the server will listen. (port value)
# Minimum value: 0
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import asyncio
import concurrent.futures
-
import gi
+
+from rift.mano.utils.project import ManoProject
+
gi.require_version("RwImageMgmtYang", "1.0")
from gi.repository import (
RwImageMgmtYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
class UploadJobError(Exception):
self._loop = loop
self._dts = dts
- def create_job(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job(self, image_name, image_checksum, project, cloud_account_names=None):
""" Create an image upload_job and return an UploadJob instance
Arguments:
Returns:
An UploadJob instance
"""
- create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+ self._log.debug("Project {}: Create image upload job for image {} to {}".
+ format(project, image_name, cloud_account_names))
+
+ create_job_msg = RwImageMgmtYang.YangInput_RwImageMgmt_CreateUploadJob.from_dict({
+ "project_name": project,
"onboarded_image": {
"image_name": image_name,
"image_checksum": image_checksum,
job_id = rpc_result.job_id
- return UploadJob(self._log, self._loop, self._dts, job_id)
+ return UploadJob(self._log, self._loop, self._dts, job_id, project)
- def create_job_threadsafe(self, image_name, image_checksum, cloud_account_names=None):
+ def create_job_threadsafe(self, image_name, image_checksum, project, cloud_account_names=None):
""" A thread-safe, syncronous wrapper for create_job """
future = concurrent.futures.Future()
def add_task():
task = self._loop.create_task(
- self.create_job(image_name, image_checksum, cloud_account_names)
+ self.create_job(image_name, image_checksum, project, cloud_account_names)
)
task.add_done_callback(on_done)
class UploadJob(object):
""" A handle for a image upload job """
- def __init__(self, log, loop, dts, job_id):
+ def __init__(self, log, loop, dts, job_id, project):
self._log = log
self._loop = loop
self._dts = dts
self._job_id = job_id
+ self._project = project
@asyncio.coroutine
def wait_until_complete(self):
UploadJobCancelled: The upload job was cancelled
"""
self._log.debug("waiting for upload job %s to complete", self._job_id)
+ xpath = ManoProject.prefix_project("D,/rw-image-mgmt:upload-jobs/" +
+ "rw-image-mgmt:job[rw-image-mgmt:id={}]".
+ format(quoted_key(str(self._job_id))),
+ project=self._project,
+ log=self._log)
+
while True:
- query_iter = yield from self._dts.query_read(
- "D,/rw-image-mgmt:upload-jobs/rw-image-mgmt:job[rw-image-mgmt:id='{}']".format(
- self._job_id
- )
- )
+ query_iter = yield from self._dts.query_read(xpath)
job_status_msg = None
for fut_resp in query_iter:
job_status_msg = (yield from fut_resp).result
create_args = dict(
location=image_url,
name=image_name,
- is_public="True",
+ is_public="False",
disk_format=disk_format,
container_format=container_format,
)
req_callback=on_http_request,
resp_callback=on_http_response,
io_loop=io_loop,
- debug_level=QuickProxyServer.DEBUG_LEVEL
+ debug_level=QuickProxyServer.DEBUG_LEVEL,
+ address="127.0.0.1",
)
def stop(self):
test_ssl=False,
debug_level=0,
io_loop=None,
+ address="",
):
"""
kwargs = {"io_loop": io_loop}
http_server = tornado.httpserver.HTTPServer(app, **kwargs)
- http_server.listen(port)
+ http_server.listen(port, address)
return http_server
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectConfigCallbacks,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import glance_proxy_server
from . import glance_client
class CloudAccountDtsHandler(object):
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._cloud_cfg_subscriber = None
+ self._project = project
+ @asyncio.coroutine
def register(self, on_add_apply, on_delete_apply):
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Project {}: creating cloud account config handler".
+ format(self._project.name))
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=on_add_apply,
on_delete_apply=on_delete_apply,
)
)
- self._cloud_cfg_subscriber.register()
+ yield from self._cloud_cfg_subscriber.register()
+
+ def deregister(self):
+ self._log.debug("Project {}: Removing cloud account config handler".
+ format(self._project.name))
+ self._cloud_cfg_subscriber.deregister()
def openstack_image_to_image_info(openstack_image):
A ImageInfo CAL protobuf message
"""
- image_info = RwcalYang.ImageInfoItem()
+ image_info = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
copy_fields = ["id", "name", "checksum", "container_format", "disk_format"]
for field in copy_fields:
value = getattr(openstack_image, field)
setattr(image_info, field, value)
+ value = getattr(openstack_image, "properties")
+ for key in value:
+ prop = image_info.properties.add()
+ prop.name = key
+ prop.property_value = value[key]
+
image_info.state = openstack_image.status
return image_info
class ImageDTSShowHandler(object):
""" A DTS publisher for the upload-jobs data container """
- def __init__(self, log, loop, dts, job_controller):
- self._log = log
- self._loop = loop
- self._dts = dts
+ def __init__(self, project, job_controller):
+ self._log = project.log
+ self._loop = project.loop
+ self._dts = project.dts
self._job_controller = job_controller
+ self._project = project
self._subscriber = None
+ def get_xpath(self):
+ return self._project.add_project("D,/rw-image-mgmt:upload-jobs")
+
@asyncio.coroutine
def register(self):
""" Register as a publisher and wait for reg_ready to complete """
- def get_xpath():
- return "D,/rw-image-mgmt:upload-jobs"
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
msg=jobs_pb_msg,
)
reg_event.set()
self._subscriber = yield from self._dts.register(
- xpath=get_xpath(),
+ xpath=self.get_xpath(),
handler=rift.tasklets.DTS.RegistrationHandler(
on_prepare=on_prepare,
on_ready=on_ready,
yield from reg_event.wait()
+ def deregister(self):
+ self._log.debug("Project {}: De-register show image handler".
+ format(self._project.name))
+ if self._subscriber:
+ self._subscriber.delete_element(self.get_xpath())
+ self._subscriber.deregister()
+ self._subscriber = None
+
class ImageDTSRPCHandler(object):
""" A DTS publisher for the upload-job RPC's """
- def __init__(self, log, loop, dts, accounts, glance_client, upload_task_creator, job_controller):
- self._log = log
- self._loop = loop
- self._dts = dts
- self._accounts = accounts
+ def __init__(self, project, glance_client, upload_task_creator, job_controller):
+ self._log = project.log
+ self._loop = project.loop
+ self._dts = project.dts
self._glance_client = glance_client
self._upload_task_creator = upload_task_creator
self._job_controller = job_controller
+ self._project = project
- self._subscriber = None
+ self._create = None
+ self._cancel = None
+
+ @property
+ def accounts(self):
+ return self._project.cloud_accounts
@asyncio.coroutine
def _register_create_upload_job(self):
create_msg = msg
account_names = create_msg.cloud_account
+
+ self._log.debug("Create upload job msg: {} ".format(msg.as_dict()))
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
# If cloud accounts were not specified, upload image to all cloud account
if not account_names:
- account_names = list(self._accounts.keys())
+ account_names = list(self.accounts.keys())
- for account_name in account_names:
- if account_name not in self._accounts:
- raise AccountNotFoundError("Could not find account %s", account_name)
+ else:
+ for account_name in account_names:
+ if account_name not in self.accounts:
+ raise AccountNotFoundError("Could not find account %s", account_name)
if create_msg.has_field("external_url"):
glance_image = yield from self._upload_task_creator.create_glance_image_from_url_create_rpc(
)
elif create_msg.has_field("onboarded_image"):
+ self._log.debug("onboarded_image {} to accounts {}".
+ format(create_msg.onboarded_image, account_names))
tasks = yield from self._upload_task_creator.create_tasks_from_onboarded_create_rpc(
account_names, create_msg.onboarded_image
)
else:
raise ImageRequestError("an image selection must be provided")
- rpc_out_msg = RwImageMgmtYang.CreateUploadJobOutput(job_id=job_id)
+ rpc_out_msg = RwImageMgmtYang.YangOutput_RwImageMgmt_CreateUploadJob(job_id=job_id)
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._create = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
if not msg.has_field("job_id"):
self._log.error("cancel-upload-job missing job-id field.")
xact_info.respond_xpath(rwdts.XactRspCode.NACK)
def on_ready(_, status):
reg_event.set()
- self._subscriber = yield from self._dts.register(
- xpath="I," + get_xpath(),
- handler=rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_prepare,
- on_ready=on_ready,
- ),
- flags=rwdts.Flag.PUBLISHER,
- )
+ self._cancel = yield from self._dts.register(
+ xpath="I," + get_xpath(),
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare,
+ on_ready=on_ready,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
yield from reg_event.wait()
yield from self._register_create_upload_job()
yield from self._register_cancel_upload_job()
+ def deregister(self):
+ self._log.debug("Project {}: Deregister image rpc handlers".
+ format(self._project.name))
+ if self._create:
+ self._create.deregister()
+ self._create = None
+
+ if self._cancel:
+ self._cancel.deregister()
+ self._cancel = None
+
class GlanceClientUploadTaskCreator(object):
""" This class creates upload tasks using configured cloud accounts and
configured image catalog glance client """
- def __init__(self, log, loop, accounts, glance_client):
- self._log = log
- self._loop = loop
- self._accounts = accounts
+ def __init__(self, project, glance_client):
+ self._log = project.log
+ self._loop = project.loop
self._glance_client = glance_client
+ self._project = project
+
+ @property
+ def accounts(self):
+ return self._project.cloud_accounts
@asyncio.coroutine
def create_tasks(self, account_names, image_id=None, image_name=None, image_checksum=None):
tasks = []
for account_name in account_names:
- if account_name not in self._accounts:
+ if account_name not in self.accounts:
raise AccountNotFoundError("Could not find account %s", account_name)
# For each account name provided, create a pipe (GlanceImagePipeGen)
# which feeds data into the UploadTask while also monitoring the various
# transmit stats (progress, bytes written, bytes per second, etc)
for account_name in account_names:
- account = self._accounts[account_name]
+ account = self.accounts[account_name]
self._log.debug("creating task for account %s", account.name)
glance_data_gen = self._glance_client.get_image_data(image_info.id)
create_msg.image_checksum if "image_checksum" in create_msg else None)
)
+class ImageMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ImageMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ try:
+ self.glance_client = kw['client']
+ except KeyError as e:
+ self._log.exception("kw {}: {}".format(kw, e))
+
+ self.cloud_cfg_subscriber = None
+ self.job_controller = None
+ self.task_creator = None
+ self.rpc_handler = None
+ self.show_handler = None
+
+ self.cloud_accounts = {}
+
+ @asyncio.coroutine
+ def register(self):
+ try:
+ self.log.debug("creating cloud account handler")
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self._log,
+ self._dts,
+ self._log_hdl,
+ self)
+ yield from self.cloud_cfg_subscriber.register(
+ self.on_cloud_account_create,
+ self.on_cloud_account_delete
+ )
+
+ self.job_controller = upload.ImageUploadJobController(
+ self
+ )
+
+ self.task_creator = GlanceClientUploadTaskCreator(
+ self, self.glance_client,
+ )
+
+ self.rpc_handler = ImageDTSRPCHandler(
+ self, self.glance_client, self.task_creator,
+ self.job_controller,
+ )
+ yield from self.rpc_handler.register()
+
+ self.show_handler = ImageDTSShowHandler(
+ self, self.job_controller,
+ )
+ yield from self.show_handler.register()
+ except Exception as e:
+ self.log.exception("Error during project {} register: e".
+ format(self.name, e))
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.rpc_handler.deregister()
+ self.show_handler.deregister()
+ self.cloud_cfg_subscriber.deregister()
+
+ def on_cloud_account_create(self, account):
+ self.log.debug("adding cloud account: %s", account.name)
+ self.cloud_accounts[account.name] = account
+
+ def on_cloud_account_delete(self, account_name):
+ self.log.debug("deleting cloud account: %s", account_name)
+ if account_name not in self.cloud_accounts:
+ self.log.warning("cloud account not found: %s", account_name)
+ else:
+ del self.cloud_accounts[account_name]
class ImageManagerTasklet(rift.tasklets.Tasklet):
"""
super().__init__(*args, **kwargs)
self.rwlog.set_category("rw-mano-log")
- self.cloud_cfg_subscriber = None
self.http_proxy = None
self.proxy_server = None
self.dts = None
- self.job_controller = None
- self.cloud_accounts = {}
self.glance_client = None
- self.task_creator = None
- self.rpc_handler = None
- self.show_handler = None
+ self.project_handler = None
+
+ self.projects = {}
def start(self):
super().start()
@asyncio.coroutine
def init(self):
try:
- self.log.debug("creating cloud account handler")
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self.log, self.dts, self.log_hdl)
- self.cloud_cfg_subscriber.register(
- self.on_cloud_account_create,
- self.on_cloud_account_delete
- )
-
self.log.debug("creating http proxy server")
self.http_proxy = glance_proxy_server.QuickProxyServer(self.log, self.loop)
)
self.proxy_server.start()
- self.job_controller = upload.ImageUploadJobController(
- self.log, self.loop
- )
-
self.glance_client = glance_client.OpenstackGlanceClient.from_token(
self.log, "127.0.0.1", "9292", "test"
)
- self.task_creator = GlanceClientUploadTaskCreator(
- self.log, self.loop, self.cloud_accounts, self.glance_client
- )
-
- self.rpc_handler = ImageDTSRPCHandler(
- self.log, self.loop, self.dts, self.cloud_accounts, self.glance_client, self.task_creator,
- self.job_controller
- )
- yield from self.rpc_handler.register()
-
- self.show_handler = ImageDTSShowHandler(
- self.log, self.loop, self.dts, self.job_controller
- )
- yield from self.show_handler.register()
+ self.log.debug("Creating project handler")
+ self.project_handler = ProjectHandler(self, ImageMgrProject,
+ client=self.glance_client)
+ self.project_handler.register()
except Exception as e:
self.log.exception("error during init")
- def on_cloud_account_create(self, account):
- self.log.debug("adding cloud account: %s", account.name)
- self.cloud_accounts[account.name] = account
-
- def on_cloud_account_delete(self, account_name):
- self.log.debug("deleting cloud account: %s", account_name)
- if account_name not in self.cloud_accounts:
- self.log.warning("cloud account not found: %s", account_name)
-
- del self.cloud_accounts[account_name]
-
@asyncio.coroutine
def run(self):
pass
""" This class starts and manages ImageUploadJobs """
MAX_COMPLETED_JOBS = 20
- def __init__(self, log, loop, max_completed_jobs=MAX_COMPLETED_JOBS):
- self._log = log
- self._loop = loop
+ def __init__(self, project, max_completed_jobs=MAX_COMPLETED_JOBS):
+ self._log = project.log
+ self._loop = project.loop
+ self._project = project
self._job_id_gen = itertools.count(1)
self._max_completed_jobs = max_completed_jobs
@property
def pb_msg(self):
""" the UploadJobs protobuf message """
- upload_jobs_msg = RwImageMgmtYang.UploadJobs()
+ upload_jobs_msg = RwImageMgmtYang.YangData_RwProject_Project_UploadJobs()
for job in self._jobs.values():
upload_jobs_msg.job.append(job.pb_msg)
@property
def pb_msg(self):
""" The UploadJob protobuf message """
- task = RwImageMgmtYang.UploadJob.from_dict({
+ task = RwImageMgmtYang.YangData_RwProject_Project_UploadJobs_Job.from_dict({
"id": self._job_id,
"status": self._state,
"start_time": self._start_time,
""" Start the rate monitoring task """
@asyncio.coroutine
def periodic_rate_task():
- while True:
- start_time = time.time()
- start_bytes = self._bytes_written
- yield from asyncio.sleep(1, loop=self._loop)
- time_period = time.time() - start_time
- num_bytes = self._bytes_written - start_bytes
+ try:
+ while True:
+ start_time = time.time()
+ start_bytes = self._bytes_written
+ yield from asyncio.sleep(1, loop=self._loop)
+ time_period = time.time() - start_time
+ num_bytes = self._bytes_written - start_bytes
- self._byte_rate = self._rate_calc.add_measurement(num_bytes, time_period)
+ self._byte_rate = self._rate_calc.add_measurement(num_bytes, time_period)
+ except asyncio.CancelledError:
+ self._log.debug("rate monitoring task cancelled")
self._log.debug("starting rate monitoring task")
self._rate_task = self._loop.create_task(periodic_rate_task())
self._write_hdl = os.fdopen(write_fd, 'wb')
self._close_hdl = self._write_hdl
+ self._stop = False
+ self._t = None
+
@property
def write_hdl(self):
return self._write_hdl
self._log.debug("starting image data write to pipe")
try:
for data in self._data_gen:
+ if self._stop:
+ break
+
try:
self._write_hdl.write(data)
except (BrokenPipeError, ValueError) as e:
t.daemon = True
t.start()
+ self._t = t
+
def stop(self):
self._log.debug("stop requested, closing write side of pipe")
- self._write_hdl.close()
+ self._stop = True
+ if self._t is not None:
+ self._t.join(timeout=1)
class AccountImageUploadTask(object):
@property
def pb_msg(self):
""" The UploadTask protobuf message """
- task = RwImageMgmtYang.UploadTask.from_dict({
+ task = RwImageMgmtYang.YangData_RwProject_Project_UploadJobs_Job_UploadTasks.from_dict({
"cloud_account": self.cloud_account,
"image_id": self.image_id,
"image_name": self.image_name,
from rift.tasklets.rwimagemgr import tasklet
from rift.tasklets.rwimagemgr import upload
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
from rift.test.dts import async_test
def configure_test(self, loop, test_id):
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
- self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
+ self.project._dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project.cloud_accounts = {'mock'}
self.task_creator_mock = create_upload_task_creator_mock()
self.job_controller_mock = create_job_controller_mock()
self.rpc_handler = tasklet.ImageDTSRPCHandler(
- self.log, self.loop, self.dts, {'mock', None}, object(), self.task_creator_mock,
+ self.project, object(), self.task_creator_mock,
self.job_controller_mock
)
self.show_handler = tasklet.ImageDTSShowHandler(
- self.log, self.loop, self.dts, self.job_controller_mock
- )
+ self.project, self.job_controller_mock)
self.tinfo_c = self.new_tinfo(self.id() + "_client")
self.dts_c = rift.tasklets.DTS(self.tinfo_c, self.schema, self.loop)
self.task_creator_mock.create_tasks_from_onboarded_create_rpc.return_value = [upload_task]
self.job_controller_mock.create_job.return_value = 2
type(self.job_controller_mock).pb_msg = unittest.mock.PropertyMock(
- return_value=RwImageMgmtYang.UploadJobs.from_dict({
+ return_value=RwImageMgmtYang.YangData_RwProject_Project_UploadJobs.from_dict({
"job": [
{
"id": 2,
})
)
- create_job_msg = RwImageMgmtYang.CreateUploadJob.from_dict({
+ create_job_msg = RwImageMgmtYang.YangInput_RwImageMgmt_CreateUploadJob.from_dict({
"cloud_account": [upload_task.cloud_account],
"onboarded_image": {
"image_name": upload_task.image_name,
"image_checksum": upload_task.image_checksum,
- }
+ },
+ "project_name": self.project.name,
})
query_iter = yield from self.dts_c.query_rpc(
)
query_iter = yield from self.dts_c.query_read(
- "D,/rw-image-mgmt:upload-jobs",
+ self.project.add_project("D,/rw-image-mgmt:upload-jobs"),
)
for fut_resp in query_iter:
from rift.tasklets.rwimagemgr import upload
from rift.package import checksums
from rift.test.dts import async_test
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
import rw_status
import gi
@rwstatus
def do_get_image_list(self, account):
- boxed_image_list = RwcalYang.VimResources()
+ boxed_image_list = RwcalYang.YangData_RwProject_Project_VimResources()
for msg in self._image_msgs:
boxed_image_list.imageinfo_list.append(msg)
def create_image_info(image_name, image_checksum):
- image = RwcalYang.ImageInfoItem()
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList()
image.name = image_name
image.checksum = image_checksum
image.disk_format = os.path.splitext(image_name)[1][1:]
class ImageMockMixin(object):
- ACCOUNT_MSG = RwCloudYang.CloudAccount(
+ ACCOUNT_MSG = RwCloudYang.YangData_RwProject_Project_Cloud_Account(
name="mock",
account_type="mock",
)
task_pb_msg = upload_task.pb_msg
self.assertEqual(upload_task.image_name, task_pb_msg.image_name)
+ # TODO: Fix this
+ @unittest.skip("Causes coredump in OSM")
@async_test
def test_cancel_image_task(self):
@asyncio.coroutine
@async_test
def test_create_image_name_and_checksum_exists(self):
with self.create_upload_task(self.account) as upload_task:
- image_entry = RwcalYang.ImageInfoItem(
+ image_entry = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList(
id="asdf",
name=upload_task.image_name,
checksum=upload_task.image_checksum
self.assertEqual("FAILED", job.state)
+ # TODO: Fix this
+ @unittest.skip("Causes coredump in OSM")
@async_test
def test_cancel_job(self):
@asyncio.coroutine
def __init__(self, *args, **kwargs):
self._loop = asyncio.get_event_loop()
self._log = logging.getLogger(__file__)
-
+ self._project = ManoProject(self._log, name=DEFAULT_PROJECT)
+ self._project._loop = self._loop
ImageMockMixin.__init__(self, self._log)
unittest.TestCase.__init__(self, *args, **kwargs)
@async_test
def test_controller_single_task_job(self):
- controller = upload.ImageUploadJobController(
- self._log, self._loop
- )
+ controller = upload.ImageUploadJobController(self._project)
with self.create_upload_task(self.account) as upload_task:
job_id = controller.create_job([upload_task])
@async_test
def test_controller_multi_task_job(self):
- controller = upload.ImageUploadJobController(
- self._log, self._loop
- )
+ controller = upload.ImageUploadJobController(self._project)
with self.create_upload_task(self.account) as upload_task1:
with self.create_upload_task(self.account) as upload_task2:
@async_test
def test_controller_multi_jobs(self):
- controller = upload.ImageUploadJobController(
- self._log, self._loop
- )
+ controller = upload.ImageUploadJobController(self._project)
with self.create_upload_task(self.account) as upload_task1:
with self.create_upload_task(self.account) as upload_task2:
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
FILES
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/convert_pkg.py
- rift/tasklets/${TASKLET_NAME}/datacenters.py
rift/tasklets/${TASKLET_NAME}/export.py
rift/tasklets/${TASKLET_NAME}/extract.py
rift/tasklets/${TASKLET_NAME}/image.py
rift/tasklets/${TASKLET_NAME}/tasklet.py
rift/tasklets/${TASKLET_NAME}/tosca.py
rift/tasklets/${TASKLET_NAME}/uploader.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
rift_python_install_tree(
FILES
rift/package/__init__.py
rift/package/archive.py
- rift/package/charm.py
rift/package/checksums.py
- rift/package/config.py
rift/package/convert.py
rift/package/handler.py
rift/package/icon.py
rift/package/script.py
rift/package/store.py
rift/package/cloud_init.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
rift_add_subdirs(test scripts)
+++ /dev/null
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import re
-import os.path
-
-from . import package
-
-
-class CharmExtractionError(Exception):
- pass
-
-
-class PackageCharmExtractor(object):
- """ This class is reponsible for extracting charms to the correct directory
-
- In order to remain compatible with the existing Jujuclient, we extract the charms
- to a known location (RIFT-13282)
- """
- DEFAULT_INSTALL_DIR = os.path.join(
- os.environ["RIFT_ARTIFACTS"],
- "launchpad"
- )
-
- CHARM_REGEX = "{prefix}charms/(trusty/)?(?P<charm_name>[^/]+)$"
-
- def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
- self._log = log
- self._install_dir = install_dir
-
- def _get_rel_dest_path(self, descriptor_id, charm_name):
- dest_rel_path = "libs/{}/charms/trusty/{}".format(descriptor_id, charm_name)
- dest_path = os.path.join(self._install_dir, dest_rel_path)
- return dest_path
-
- @classmethod
- def charm_dir_map(cls, package):
- charm_map = {}
- regex = cls.CHARM_REGEX.format(prefix=package.prefix)
-
- for dir_name in package.dirs:
- match = re.match(
- cls.CHARM_REGEX.format(prefix=package.prefix), dir_name,
- )
- if match is None:
- continue
-
- charm_name = match.group("charm_name")
- if charm_name == "trusty":
- continue
-
- charm_map[charm_name] = dir_name
-
- return charm_map
-
- def get_extracted_charm_dir(self, package_id, charm_name):
- return os.path.join(
- self._get_rel_dest_path(package_id, charm_name),
- )
-
- def extract_charms(self, pkg):
- """ Extract charms contained within the DescriptorPackage
- to the known charm directory.
-
- Arguments:
- pkg - The descriptor package that MAY contain charm directories
-
- Raises:
- CharmExtractionError - Charms in the package failed to get extracted
- """
- descriptor_id = pkg.descriptor_id
- charm_dir_map = PackageCharmExtractor.charm_dir_map(pkg)
-
- for charm_name, charm_dir in charm_dir_map.items():
- dest_rel_path = self._get_rel_dest_path(descriptor_id, charm_name)
- dest_path = os.path.join(self._install_dir, dest_rel_path)
-
- self._log.debug("Extracting %s charm to %s", charm_name, dest_path)
- try:
- pkg.extract_dir(charm_dir, dest_path)
- except package.ExtractError as e:
- raise CharmExtractionError("Failed to extract charm %s" % charm_name) from e
def to_string(self):
string = ""
for file_name, file_checksum in self.items():
- string += "{} {}\n".format(file_name, file_checksum)
+ string += "{} {}\n".format(file_checksum, file_name)
return string
+++ /dev/null
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import re
-import os.path
-
-from . import package
-
-
-class ConfigExtractionError(Exception):
- pass
-
-
-class PackageConfigExtractor(object):
- """ This class is reponsible for extracting config data to the correct directory
-
- In order to remain compatible with the existing ConfigManager, we extract the config
- to a known location (RIFT-13282)
- """
- DEFAULT_INSTALL_DIR = os.path.join(
- os.environ["RIFT_ARTIFACTS"],
- "launchpad"
- )
-
- CONFIG_REGEX = "{prefix}(ns_config|vnf_config)/(?P<config_name>[^/]+.yaml)$"
-
- def __init__(self, log, install_dir=DEFAULT_INSTALL_DIR):
- self._log = log
- self._install_dir = install_dir
-
- def _get_rel_dest_path(self, descriptor_id, config_name):
- dest_rel_path = "libs/{}/config/{}".format(descriptor_id, config_name)
- dest_path = os.path.join(self._install_dir, dest_rel_path)
- return dest_path
-
- @classmethod
- def package_config_files(cls, package):
- config_map = {}
- regex = cls.CONFIG_REGEX.format(prefix=package.prefix)
-
- for file_name in package.files:
- match = re.match(
- cls.CONFIG_REGEX.format(prefix=package.prefix), file_name,
- )
- if match is None:
- continue
-
- config_name = match.group("config_name")
-
- config_map[config_name] = file_name
-
- return config_map
-
- def get_extracted_config_path(self, package_id, config_name):
- return os.path.join(
- self._get_rel_dest_path(package_id, os.path.basename(config_name)),
- )
-
- def extract_configs(self, pkg):
- """ Extract any configuration files from the DescriptorPackage
-
- Arguments:
- pkg - A DescriptorPackage
-
- Raises:
- ConfigExtractionError - The configuration could not be extracted
- """
- descriptor_id = pkg.descriptor_id
-
- config_files = PackageConfigExtractor.package_config_files(pkg).items()
- for config_name, config_file in config_files:
- dest_rel_path = self._get_rel_dest_path(descriptor_id, config_name)
- dest_path = os.path.join(self._install_dir, dest_rel_path)
-
- self._log.debug("Extracting %s config to %s", config_name, dest_path)
- try:
- pkg.extract_file(config_file, dest_path)
- except package.ExtractError as e:
- raise ConfigExtractionError("Failed to extract config %s" % config_name) from e
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
import json
+import logging
import os
-import tempfile
+import yaml
import gi
gi.require_version('RwNsdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
RwNsdYang,
RwVnfdYang,
NsdYang,
VnfdYang,
+ RwProjectNsdYang,
+ RwProjectVnfdYang,
+ ProjectNsdYang,
+ ProjectVnfdYang,
RwYang,
)
+from rift.mano.utils.project import NS_PROJECT
+from rift.rwlib.translation.json2xml import InvalidSchemaException
class UnknownExtensionError(Exception):
pass
class ProtoMessageSerializer(object):
"""(De)Serializer/deserializer fo a specific protobuf message into various formats"""
- libncx_model = None
+ libyang_model = None
- def __init__(self, yang_ns, yang_pb_cls):
+ def __init__(self, yang_ns, yang_pb_cls,
+ yang_ns_project, yang_pb_project_cls):
""" Create a serializer for a specific protobuf message """
self._yang_ns = yang_ns
self._yang_pb_cls = yang_pb_cls
+ self._yang_ns_project = yang_ns_project
+ self._yang_pb_project_cls = yang_pb_project_cls
+
+ self._log = logging.getLogger('rw-maon-log')
@classmethod
def _deserialize_extension_method_map(cls):
""" The Protobuf's GI class (e.g. RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd) """
return self._yang_pb_cls
+ @property
+ def yang_ns_project(self):
+ """ The Protobuf's GI namespace class (e.g. RwProjectVnfdYang) """
+ return self._yang_ns_project
+
+ @property
+ def yang_class_project(self):
+ """ The Protobuf's GI class (e.g. RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd) """
+ return self._yang_pb_project_cls
+
@property
def model(self):
cls = self.__class__
- # Cache the libncx model for the serializer class
- if cls.libncx_model is None:
- cls.libncx_model = RwYang.model_create_libncx()
- cls.libncx_model.load_schema_ypbc(self.yang_namespace.get_schema())
+ # Cache the libyang model for the serializer class
+ if cls.libyang_model is None:
+ cls.libyang_model = RwYang.model_create_libyang()
+ cls.libyang_model.load_schema_ypbc(self.yang_namespace.get_schema())
+ cls.libyang_model.load_schema_ypbc(self.yang_ns_project.get_schema())
- return cls.libncx_model
+ return cls.libyang_model
- def _from_xml_file_hdl(self, file_hdl):
+ def _from_xml_file_hdl(self, file_hdl, project=None):
xml = file_hdl.read()
- return self.yang_class.from_xml_v2(self.model, decode(xml), strict=False)
+ return self.yang_class.from_xml_v2(self.model, decode(xml), strict=False) \
+ if not project else self._yang_pb_project_cls.from_xml_v2(self.model, decode(xml), strict=False)
+
+ def _from_json_file_hdl(self, file_hdl, project=None):
+ jstr = file_hdl.read()
+ self._log.debug("Convert from json file: {}".format(jstr))
+
+ try:
+ if not project:
+ desc_msg = self.yang_class.from_json(self.model, decode(jstr), strict=False)
+ else:
+ desc_msg = self._yang_pb_project_cls.from_json(self.model, decode(jstr), strict=False)
+
+ self._log.debug("desc_msg: {}".format(desc_msg.as_dict()))
+ return self.yang_class_project.from_dict(desc_msg.as_dict())
+ except Exception as e:
+ self._log.exception(e)
+ raise e
+
+ def _from_yaml_file_hdl(self, file_hdl, project=None):
+ yml = file_hdl.read()
+
+ try:
+ desc_msg = self.yang_class.from_yaml(self.model, decode(yml), strict=True)
+ except InvalidSchemaException as invalid_scheme_exception:
+ self._log.error("Exception raised during schema translation, %s. Launchpad will" \
+ "continue to process the remaining elements ", str(invalid_scheme_exception))
+ desc_msg = self.yang_class.from_yaml(self.model, decode(yml), strict=False)
+ except Exception as e:
+ self._log.exception(e)
+ raise e
+
+ return self.yang_class_project.from_dict(desc_msg.as_dict())
- def _from_json_file_hdl(self, file_hdl):
- json = file_hdl.read()
+ def to_desc_msg(self, pb_msg, project_rooted=True):
+ """Convert to and from project rooted pb msg descriptor to catalog
+ rooted pb msg
+ project_rooted: if pb_msg is project rooted or not
+ """
+ if project_rooted:
+ if isinstance(pb_msg, self._yang_pb_project_cls):
+ return self._yang_pb_cls.from_dict(pb_msg.as_dict())
+ elif isinstance(pb_msg, self._yang_pb_cls):
+ return pb_msg
- return self.yang_class.from_json(self.model, decode(json), strict=False)
+ else:
+ if isinstance(pb_msg, self._yang_pb_cls):
+ return self._yang_pb_project_cls.from_dict(pb_msg.as_dict())
+ elif isinstance(pb_msg, self._yang_pb_project_cls):
+ return pb_msg
- def _from_yaml_file_hdl(self, file_hdl):
- yaml = file_hdl.read()
+ raise TypeError("Invalid protobuf message type provided: {}".format(type(pb_msg)))
- return self.yang_class.from_yaml(self.model, decode(yaml), strict=False)
- def to_json_string(self, pb_msg):
+ def to_json_string(self, pb_msg, project_ns=False):
""" Serialize a protobuf message into JSON
Arguments:
pb_msg - A GI-protobuf object of type provided into constructor
+ project_ns - Need the desc in project namespace, required for
+ posting to Restconf as part of onboarding
Returns:
A JSON string representing the protobuf message
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
+ self._log.debug("Convert desc to json (ns:{}): {}".format(project_ns, pb_msg.as_dict()))
try:
- json_str = pb_msg.to_json(self.model)
+ # json_str = pb_msg.to_json(self.model)
+
+ desc_msg = self.to_desc_msg(pb_msg, not project_ns)
+ json_str = desc_msg.to_json(self.model)
+ if project_ns:
+ # Remove rw-project:project top level element
+ dic = json.loads(json_str)
+ jstr = json.dumps(dic[NS_PROJECT][0])
+ else:
+ jstr = json_str
except Exception as e:
raise SerializationError(e)
- return json_str
+ self._log.debug("Convert desc to json: {}".format(jstr))
+ return jstr
- def to_yaml_string(self, pb_msg):
+ def to_yaml_string(self, pb_msg, project_ns=False):
""" Serialize a protobuf message into YAML
Arguments:
pb_msg - A GI-protobuf object of type provided into constructor
+ project_ns - Need the desc in project namespace, required for
+ posting to Restconf as part of onboarding
Returns:
A YAML string representing the protobuf message
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
+ self._log.debug("Convert desc to yaml (ns:{}): {}".format(project_ns, pb_msg.as_dict()))
try:
- yaml_str = pb_msg.to_yaml(self.model)
+ desc_msg = self.to_desc_msg(pb_msg, not project_ns)
+ yaml_str = desc_msg.to_yaml(self.model)
+ if project_ns:
+ # Remove rw-project:project top level element
+ dic = yaml.loads(yaml_str)
+ ystr = yaml.dump(dic[NS_PROJECT][0])
+ else:
+ ystr = yaml_str
+
except Exception as e:
+ self._log.exception("Exception converting to yaml: {}".format(e))
raise SerializationError(e)
- return yaml_str
+ return ystr
def to_xml_string(self, pb_msg):
""" Serialize a protobuf message into XML
SerializationError - Message could not be serialized
TypeError - Incorrect protobuf type provided
"""
- if not isinstance(pb_msg, self._yang_pb_cls):
- raise TypeError("Invalid protobuf message type provided")
-
try:
- xml_str = pb_msg.to_xml_v2(self.model)
+ desc_msg = self.to_desc_msg(pb_msg)
+ xml_str = desc_msg.to_xml_v2(self.model)
except Exception as e:
+ self._log.exception("Exception converting to xml: {}".format(e))
raise SerializationError(e)
return xml_str
- def from_file_hdl(self, file_hdl, extension):
+ def from_file_hdl(self, file_hdl, extension, project=None):
""" Returns the deserialized protobuf message from file contents
This function determines the serialization format based on file extension
raise UnknownExtensionError("Cannot detect message format for %s extension" % extension_lc)
try:
- msg = extension_map[extension_lc](self, file_hdl)
+ self._log.debug("Converting from json..project = {}".format(project))
+ msg = extension_map[extension_lc](self, file_hdl, project)
except Exception as e:
raise SerializationError(e)
class VnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(VnfdYang, VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd,
+ ProjectVnfdYang, ProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class NsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(NsdYang, NsdYang.YangData_Nsd_NsdCatalog_Nsd,
+ ProjectNsdYang, ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
class RwVnfdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the VNFD descriptor"""
def __init__(self):
- super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd)
+ super().__init__(RwVnfdYang, RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd,
+ RwProjectVnfdYang, RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd)
class RwNsdSerializer(ProtoMessageSerializer):
""" Creates a serializer for the NSD descriptor"""
def __init__(self):
- super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd)
+ super().__init__(RwNsdYang, RwNsdYang.YangData_Nsd_NsdCatalog_Nsd,
+ RwProjectNsdYang, RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd)
# Return the root object!
structure = folder_cache[root_dir].serialize()
+ self.set_header('Content-Type','application/json')
self.write(tornado.escape.json_encode(structure))
raise PackageValidationError(msg) from e
if archive_checksums[pkg_file_no_prefix] != file_checksum:
- msg = "{} checksum ({}) did match expected checksum ({})".format(
+ msg = "{} checksum ({}) did not match expected checksum ({})".format(
pkg_file, file_checksum, archive_checksums[pkg_file_no_prefix]
)
self._log.error(msg)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@property
def root_dir(self):
return self._root_dir
-
def _get_package_dir(self, package_id):
+ self._log.debug("Package dir {}, {}".format(self._root_dir, package_id))
return os.path.join(self._root_dir, package_id)
def _get_package_files(self, package_id):
return pkg
- def store_package(self, pkg):
+ def store_package(self, pkg, project=None):
""" Store a DescriptorPackage to disk
Arguments:
raise PackageExistsError("Package %s already exists", pkg.descriptor_id)
package_dir = self._get_package_dir(pkg.descriptor_id)
-
try:
os.makedirs(package_dir, exist_ok=True)
except OSError as e:
PackageStoreError - The package could not be deleted
"""
+ self.refresh()
+
if descriptor_id not in self._package_dirs:
raise PackageNotFoundError("Package %s does not exists", descriptor_id)
class NsdPackageFilesystemStore(PackageFilesystemStore):
DEFAULT_ROOT_DIR = os.path.join(
- os.environ["RIFT_ARTIFACTS"],
+ os.environ["RIFT_VAR_ROOT"],
"launchpad", "packages", "nsd"
)
- def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+ def __init__(self, log, root_dir=DEFAULT_ROOT_DIR, project=None):
+ root_dir = root_dir if not project else os.path.join(root_dir, project)
super().__init__(log, root_dir)
class VnfdPackageFilesystemStore(PackageFilesystemStore):
DEFAULT_ROOT_DIR = os.path.join(
- os.environ["RIFT_ARTIFACTS"],
+ os.environ["RIFT_VAR_ROOT"],
"launchpad", "packages", "vnfd"
)
- def __init__(self, log, root_dir=DEFAULT_ROOT_DIR):
+ def __init__(self, log, root_dir=DEFAULT_ROOT_DIR, project=None):
+ root_dir = root_dir if not project else os.path.join(root_dir, project)
super().__init__(log, root_dir)
-
+++ /dev/null
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import asyncio
-
-from gi.repository import (
- RwDts,
- RwLaunchpadYang,
-)
-
-import rift.mano.dts as mano_dts
-import rift.openmano.openmano_client as openmano_client
-import rift.tasklets
-
-
-class DataCenterPublisher(mano_dts.DtsHandler):
- """
- This class is reponsible for exposing the data centers associated with an
- openmano cloud account.
- """
-
- XPATH = "D,/rw-launchpad:datacenters"
-
- def __init__(self, log, dts, loop):
- """Creates an instance of a DataCenterPublisher
-
- Arguments:
- tasklet - the tasklet that this publisher is registered for
-
- """
- super().__init__(log, dts, loop)
-
- self._ro_sub = mano_dts.ROAccountConfigSubscriber(
- self.log,
- self.dts,
- self.loop,
- callback=self.on_ro_account_change
- )
- self.ro_accounts = {}
-
- def on_ro_account_change(self, ro_account, action):
- if action in [ RwDts.QueryAction.CREATE, RwDts.QueryAction.UPDATE ]:
- self.ro_accounts[ro_account.name] = ro_account
- elif action == RwDts.QueryAction.DELETE and ro_account.name in self.ro_accounts:
- del self.ro_accounts[ro_account.name]
-
- @asyncio.coroutine
- def register(self):
- """Registers the publisher with DTS"""
- yield from self._ro_sub.register()
-
- @asyncio.coroutine
- def on_prepare(xact_info, action, ks_path, msg):
- try:
- # Create a datacenters instance to hold all of the cloud
- # account data.
- datacenters = RwLaunchpadYang.DataCenters()
-
- # Iterate over the known openmano accounts and populate cloud
- # account instances with the corresponding data center info
- for _, account in self.ro_accounts.items():
- if account.account_type != "openmano":
- continue
-
- try:
- ro_account = RwLaunchpadYang.ROAccount()
- ro_account.name = account.name
-
- # Create a client for this cloud account to query for
- # the associated data centers
- client = openmano_client.OpenmanoCliAPI(
- self.log,
- account.openmano.host,
- account.openmano.port,
- account.openmano.tenant_id,
- )
-
- # Populate the cloud account with the data center info
- for uuid, name in client.datacenter_list():
- ro_account.datacenters.append(
- RwLaunchpadYang.DataCenter(
- uuid=uuid,
- name=name,
- )
- )
-
- datacenters.ro_accounts.append(ro_account)
-
- except Exception as e:
- self.log.exception(e)
-
- xact_info.respond_xpath(
- RwDts.XactRspCode.MORE,
- 'D,/rw-launchpad:datacenters',
- datacenters,
- )
-
- xact_info.respond_xpath(RwDts.XactRspCode.ACK)
-
- except Exception as e:
- self.log.exception(e)
- raise
-
- handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
-
- with self.dts.group_create() as group:
- self.reg = group.register(
- xpath=DataCenterPublisher.XPATH,
- handler=handler,
- flags=RwDts.Flag.PUBLISHER,
- )
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from . import tosca
import gi
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
- RwPkgMgmtYang)
+ RwPkgMgmtYang,
+ RwVnfdYang,
+ RwProjectVnfdYang,
+ RwNsdYang,
+ RwProjectNsdYang
+)
import rift.mano.dts as mano_dts
finally:
package.open = orig_open
- def create_archive(self, archive_hdl, package, desc_json_str, serializer):
+ def create_archive(self, archive_hdl, package, desc_json_str, serializer, project=None):
""" Create a package archive from an existing package, descriptor messages,
and a destination serializer.
ArchiveExportError - The exported archive failed to create
"""
- new_desc_msg = serializer.from_file_hdl(io.BytesIO(desc_json_str.encode()), ".json")
+ new_desc_msg = serializer.from_file_hdl(io.BytesIO(desc_json_str.encode()), ".json", project)
_, dest_ext = os.path.splitext(package.descriptor_file)
new_desc_hdl = io.BytesIO(serializer.to_string(new_desc_msg, dest_ext).encode())
descriptor_checksum = rift.package.checksums.checksum(new_desc_hdl)
checksum_hdl
)
- archive_checksums[package.descriptor_file] = descriptor_checksum
+ # Get the name of the descriptor file without the prefix
+ # (which is what is stored in the checksum file)
+ desc_file_no_prefix = os.path.relpath(package.descriptor_file, package.prefix)
+ archive_checksums[desc_file_no_prefix] = descriptor_checksum
checksum_hdl = io.BytesIO(archive_checksums.to_string().encode())
return checksum_hdl
return archive
- def export_package(self, package, export_dir, file_id, json_desc_str, dest_serializer):
+ def export_package(self, package, export_dir, file_id, json_desc_str, dest_serializer, project=None):
""" Export package as an archive to the export directory
Arguments:
with open(archive_path, 'wb') as archive_hdl:
try:
self.create_archive(
- archive_hdl, package, json_desc_str, dest_serializer
+ archive_hdl, package, json_desc_str, dest_serializer, project
)
except Exception as e:
os.remove(archive_path)
class ExportRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application, store_map, exporter, onboarder, catalog_map):
+ def __init__(self, application, catalog_map):
"""
Args:
application: UploaderApplication
- store_map: dict containing VnfdStore & NsdStore
- exporter : DescriptorPackageArchiveExporter
calalog_map: Dict containing Vnfds and Nsd onboarding.
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
- self.store_map = store_map
- self.exporter = exporter
- self.onboarder = onboarder
+ self.exporter = application.exporter
+ self.onboarder = application.onboarder
self.catalog_map = catalog_map
- self.log = log
+
+
@property
def xpath(self):
return rpc_out
def export(self, transaction_id, log, msg):
+ DESC_TYPE_PB_MAP = {
+ "vnfd": RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd,
+ "nsd": RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd
+ }
+
log.message(ExportStart())
desc_type = msg.package_type.lower()
# Parse the IDs
desc_id = msg.package_id
- catalog = self.catalog_map[desc_type]
-
- if desc_id not in catalog:
- raise ValueError("Unable to find package ID: {}".format(desc_id))
-
- desc_msg = catalog[desc_id]
+ catalog = self.catalog_map[desc_type](project=msg.project_name)
+
+ # TODO: Descriptor isn't available from catalog info passed in from launchpad tasklet.
+ # If unavailable, create a filler descriptor object, which will be updated
+ # via GET call to config.
+ if desc_id in catalog:
+ desc_msg = catalog[desc_id]
+ else:
+ log.warn("Unable to find package ID in catalog: {}".format(desc_id))
+ desc_msg = DESC_TYPE_PB_MAP[desc_type](id = desc_id)
+
+ self.store_map = self.application.build_store_map(project=msg.project_name)
+ self.project_name = msg.project_name if msg.has_field('project_name') else None
# Get the schema for exporting
schema = msg.export_schema.lower()
# If that fails, create a temporary package using the descriptor only
try:
package = package_store.get_package(desc_id)
+ #Remove the image file from the package while exporting
+ for file in package.files:
+ if rift.package.image.is_image_file(file):
+ package.remove_file(file)
+
except rift.package.store.PackageNotFoundError:
log.debug("stored package not found. creating package from descriptor config")
log, hdl
)
- # Try to get the updated descriptor from the api endpoint so that we have
- # the updated descriptor file in the exported archive and the name of the archive
- # tar matches the name in the yaml descriptor file. Proceed with the current
- # file if there's an error
+ # Get the updated descriptor from the api endpoint to get any updates
+ # made to the catalog. Also desc_msg may not be populated correctly as yet.
#
- json_desc_msg = src_serializer.to_json_string(desc_msg)
- desc_name, desc_version = desc_msg.name, desc_msg.version
+
try:
- d = collections.defaultdict(dict)
- sub_dict = self.onboarder.get_updated_descriptor(desc_msg)
- root_key, sub_key = "{0}:{0}-catalog".format(desc_type), "{0}:{0}".format(desc_type)
- # root the dict under "vnfd:vnfd-catalog"
- d[root_key] = sub_dict
+ # merge the descriptor content: for rbac everything needs to be project rooted, with project name.
+ D = collections.defaultdict(dict)
+ sub_dict = self.onboarder.get_updated_descriptor(desc_msg, self.project_name)
+
+ if self.project_name:
+ D["project"] = dict(name = self.project_name)
+ root_key, sub_key = "project-{0}:{0}-catalog".format(desc_type), "project-{0}:{0}".format(desc_type)
+ D["project"].update({root_key: sub_dict})
+ else:
+ root_key, sub_key = "{0}:{0}-catalog".format(desc_type), "{0}:{0}".format(desc_type)
+ D[root_key] = sub_dict
- json_desc_msg = json.dumps(d)
- desc_name, desc_version = sub_dict[sub_key]['name'], sub_dict[sub_key]['version']
-
+ json_desc_msg = json.dumps(D)
+ desc_name, desc_version = sub_dict[sub_key]['name'], sub_dict[sub_key].get('version', '')
+
except Exception as e:
msg = "Exception {} raised - {}".format(e.__class__.__name__, str(e))
- self.log.debug(msg)
+ self.log.error(msg)
+ raise ArchiveExportError(msg) from e
# exported filename based on the updated descriptor name
self.filename = "{}_{}".format(desc_name, desc_version)
+ self.log.debug("JSON string for descriptor: {}".format(json_desc_msg))
self.exporter.export_package(
package=package,
file_id = self.filename,
json_desc_str=json_desc_msg,
dest_serializer=dest_serializer,
+ project=self.project_name,
)
def export_tosca(self, format_, schema, desc_type, desc_id, desc_msg, log, transaction_id):
upload_hdl))
else:
- # See if the pacakage can be converted
+ # See if the package can be converted
files = ConvertPackage(self._log,
uploaded_file,
extracted_pkgfile).convert(delete=True)
self._log.debug("Upload converted file: {}".format(f))
upload_hdl = open(f, "r+b")
package = create_package_from_tar_file(upload_hdl)
- tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
- package,
- upload_hdl))
+ if package.descriptor_id:
+ tmp_pkgs.append(rift.package.package.TemporaryPackage(self._log,
+ package,
+ upload_hdl))
except Exception as e:
# Cleanup any TemporaryPackage instances created
self._client = client.UploadJobClient(self._log, self._loop, self._dts)
- def upload_image(self, image_name, image_checksum, image_hdl):
+ def upload_image(self, image_name, image_checksum, image_hdl, set_image_property=None):
endpoint = "http://127.0.0.1:9292"
glance_client = glanceclient.Client('1', endpoint, token="asdf")
image = glance_client.images.create(name=image_name, data=image_hdl, is_public="False",
disk_format="qcow2", container_format="bare",
- checksum=image_checksum)
+ checksum=image_checksum, properties=set_image_property)
self._log.debug('Image upload complete: %s', image)
except Exception as e:
raise ImageUploadError("Failed to upload image to catalog: %s" % str(e)) from e
- def upload_image_to_cloud_accounts(self, image_name, image_checksum, cloud_accounts=None):
+ def upload_image_to_cloud_accounts(self, image_name, image_checksum, project, cloud_accounts=None):
self._log.debug("uploading image %s to all cloud accounts", image_name)
- upload_job = self._client.create_job_threadsafe(image_name, image_checksum, cloud_accounts)
+ upload_job = self._client.create_job_threadsafe(image_name, image_checksum, project, cloud_accounts)
try:
upload_job.wait_until_complete_threadsafe()
except client.UploadJobError as e:
raise ImageUploadError("Failed to upload image " + image_name + " to cloud accounts") from e
-
def __repr__(self):
return "{} {}:{}:{}".format(
self.timestamp,
- logging._levelNames.get(self.level, self.level),
+ logging._levelToName.get(self.level, self.level),
self.name,
self.text,
)
import requests
+from rift.mano.utils.project import DEFAULT_PROJECT
from rift.package import convert
from gi.repository import (
- NsdYang,
- RwNsdYang,
- VnfdYang,
- RwVnfdYang,
+ ProjectNsdYang as NsdYang,
+ RwNsdYang as RwNsdYang,
+ RwProjectNsdYang as RwProjectNsdYang,
+ ProjectVnfdYang as VnfdYang,
+ RwVnfdYang as RwVnfdYang,
+ RwProjectVnfdYang as RwProjectVnfdYang,
)
class DescriptorOnboarder(object):
""" This class is responsible for onboarding descriptors using Restconf"""
DESC_ENDPOINT_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: "nsd-catalog/nsd",
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: "nsd-catalog/nsd",
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd",
+ RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: "vnfd-catalog/vnfd"
}
DESC_SERIALIZER_MAP = {
- NsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.NsdSerializer(),
+ NsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.NsdSerializer(),
RwNsdYang.YangData_Nsd_NsdCatalog_Nsd: convert.RwNsdSerializer(),
- VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
- RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+ RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd: convert.RwNsdSerializer(),
+ VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.VnfdSerializer(),
+ RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd: convert.RwVnfdSerializer(),
+ RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd: convert.RwVnfdSerializer()
}
HEADERS = {"content-type": "application/vnd.yang.data+json"}
self.timeout = DescriptorOnboarder.TIMEOUT_SECS
@classmethod
- def _get_headers(cls, auth):
+ def _get_headers(cls):
headers = cls.HEADERS.copy()
- if auth is not None:
- headers['authorization'] = auth
return headers
- def _get_url(self, descriptor_msg):
+ def _get_url(self, descriptor_msg, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
+ if project is None:
+ project = DEFAULT_PROJECT
+
endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
+ ep = "project/{}/{}".format(project, endpoint)
url = "{}://{}:{}/api/config/{}".format(
"https" if self._use_ssl else "http",
self._host,
self.port,
- endpoint,
+ ep,
)
return url
- def _make_request_args(self, descriptor_msg, auth=None):
+ def _make_request_args(self, descriptor_msg, auth=None, project=None):
if type(descriptor_msg) not in DescriptorOnboarder.DESC_SERIALIZER_MAP:
raise TypeError("Invalid descriptor message type")
serializer = DescriptorOnboarder.DESC_SERIALIZER_MAP[type(descriptor_msg)]
- json_data = serializer.to_json_string(descriptor_msg)
- url = self._get_url(descriptor_msg)
+ json_data = serializer.to_json_string(descriptor_msg, project_ns=True)
+ url = self._get_url(descriptor_msg, project=project)
request_args = dict(
url=url,
data=json_data,
- headers=self._get_headers(auth),
- auth=DescriptorOnboarder.AUTH,
+ headers=self._get_headers(),
+ auth=DescriptorOnboarder.AUTH if auth is None else auth,
verify=False,
cert=(self._ssl_cert, self._ssl_key) if self._use_ssl else None,
timeout=self.timeout,
return request_args
- def update(self, descriptor_msg, auth=None):
+ def update(self, descriptor_msg, auth=None, project=None):
""" Update the descriptor config
Arguments:
self._log.error(msg)
raise UpdateError(msg) from e
- def onboard(self, descriptor_msg, auth=None):
+ def onboard(self, descriptor_msg, auth=None, project=None):
""" Onboard the descriptor config
Arguments:
OnboardError - The descriptor config update failed
"""
- request_args = self._make_request_args(descriptor_msg, auth)
+ request_args = self._make_request_args(descriptor_msg, auth, project)
try:
response = requests.post(**request_args)
response.raise_for_status()
except requests.exceptions.ConnectionError as e:
msg = "Could not connect to restconf endpoint: %s" % str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.HTTPError as e:
msg = "POST request to %s error: %s" % (request_args["url"], response.text)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
except requests.exceptions.Timeout as e:
msg = "Timed out connecting to restconf endpoint: %s", str(e)
self._log.error(msg)
+ self._log.exception(msg)
raise OnboardError(msg) from e
- def get_updated_descriptor(self, descriptor_msg, auth=None):
+ def get_updated_descriptor(self, descriptor_msg, project_name, auth=None):
""" Get updated descriptor file
Arguments:
endpoint = DescriptorOnboarder.DESC_ENDPOINT_MAP[type(descriptor_msg)]
- url = "{}://{}:{}/api/config/{}/{}".format(
+ url = "{}://{}:{}/api/config/project/{}/{}/{}".format(
"https" if self._use_ssl else "http",
self._host,
self.port,
+ project_name,
endpoint,
descriptor_msg.id
)
- hdrs = self._get_headers(auth)
+ hdrs = self._get_headers()
hdrs.update({'Accept': 'application/json'})
request_args = dict(
url=url,
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import tornado.httputil
import tornado.httpserver
import tornado.platform.asyncio
+import abc
import tornadostreamform.multipart_streamer as multipart_streamer
gi.require_version('RwDts', '1.0')
gi.require_version('RwcalYang', '1.0')
gi.require_version('RwTypes', '1.0')
+gi.require_version('rwlib', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
from gi.repository import (
RwLaunchpadYang as rwlaunchpad,
RwcalYang as rwcal,
RwTypes,
+ RwPkgMgmtYang
)
+import gi.repository.rwlib as rwlib
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
import rift.mano.cloud
+import rift.mano.ro_account
import rift.mano.config_agent
+import rift.downloader as downloader
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from rift.package import store
from . import uploader
-from . import datacenters
MB = 1024 * 1024
GB = 1024 * MB
MAX_BUFFER_SIZE = 1 * MB # Max. size loaded into memory!
MAX_BODY_SIZE = 1 * MB # Max. size loaded into memory!
+TaskStatus = RwPkgMgmtYang.TaskStatus
-def get_add_delete_update_cfgs(dts_member_reg, xact, key_name):
- # Unforunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
+class LaunchpadError(Exception):
+ pass
+class LpProjectNotFound(Exception):
+ pass
class CatalogDtsHandler(object):
- def __init__(self, tasklet, app):
+ def __init__(self, project, app):
self.app = app
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project.log
@property
def dts(self):
- return self.tasklet.dts
+ return self.project.dts
class NsdCatalogDtsHandler(CatalogDtsHandler):
- XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+ XPATH = "C,/project-nsd:nsd-catalog/project-nsd:nsd"
def add_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:add:{}'.format(nsd.id))
- if nsd.id not in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id not in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("nsd already in catalog: {}".format(nsd.id))
def update_nsd(self, nsd):
self.log.debug('nsd-catalog-handler:update:{}'.format(nsd.id))
- if nsd.id in self.tasklet.nsd_catalog:
- self.tasklet.nsd_catalog[nsd.id] = nsd
+ if nsd.id in self.project.nsd_catalog:
+ self.project.nsd_catalog[nsd.id] = nsd
else:
self.log.error("unrecognized NSD: {}".format(nsd.id))
def delete_nsd(self, nsd_id):
self.log.debug('nsd-catalog-handler:delete:{}'.format(nsd_id))
- if nsd_id in self.tasklet.nsd_catalog:
- del self.tasklet.nsd_catalog[nsd_id]
+ if nsd_id in self.project.nsd_catalog:
+ del self.project.nsd_catalog[nsd_id]
else:
self.log.error("unrecognized NSD: {}".format(nsd_id))
try:
- self.tasklet.nsd_package_store.delete_package(nsd_id)
+ self.project.nsd_package_store.delete_package(nsd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
def register(self):
def apply_config(dts, acg, xact, action, _):
if xact.xact is None:
- # When RIFT first comes up, an INSTALL is called with the current config
- # Since confd doesn't actally persist data this never has any data so
- # skip this for now.
- self.log.debug("No xact handle. Skipping apply config")
+ if action == rwdts.AppconfAction.INSTALL:
+ if self.reg:
+ for element in self.reg.elements:
+ self.log.debug("Add NSD on restart: {}".format(element.id))
+ self.add_nsd(element)
+ else:
+ self.log.error("DTS handle is null for project {}".
+ format(self.project.name))
+ else:
+ self.log.debug("No xact handle. Skipping apply config")
return
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
for cfg in update_cfgs:
self.update_nsd(cfg)
- self.log.debug("Registering for NSD catalog")
+ self.log.debug("Registering for NSD catalog in project".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(NsdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=NsdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class VnfdCatalogDtsHandler(CatalogDtsHandler):
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def add_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:add:{}'.format(vnfd.id))
- if vnfd.id not in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+ if vnfd.id not in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("VNFD already in catalog: {}".format(vnfd.id))
def update_vnfd(self, vnfd):
self.log.debug('vnfd-catalog-handler:update:{}'.format(vnfd.id))
- if vnfd.id in self.tasklet.vnfd_catalog:
- self.tasklet.vnfd_catalog[vnfd.id] = vnfd
+
+ if vnfd.id in self.project.vnfd_catalog:
+ self.project.vnfd_catalog[vnfd.id] = vnfd
else:
self.log.error("unrecognized VNFD: {}".format(vnfd.id))
def delete_vnfd(self, vnfd_id):
self.log.debug('vnfd-catalog-handler:delete:{}'.format(vnfd_id))
- if vnfd_id in self.tasklet.vnfd_catalog:
- del self.tasklet.vnfd_catalog[vnfd_id]
+ if vnfd_id in self.project.vnfd_catalog:
+ del self.project.vnfd_catalog[vnfd_id]
else:
self.log.error("unrecognized VNFD: {}".format(vnfd_id))
try:
- self.tasklet.vnfd_package_store.delete_package(vnfd_id)
+ self.project.vnfd_package_store.delete_package(vnfd_id)
except store.PackageStoreError as e:
self.log.warning("could not delete package from store: %s", str(e))
def register(self):
def apply_config(dts, acg, xact, action, _):
if xact.xact is None:
- # When RIFT first comes up, an INSTALL is called with the current config
- # Since confd doesn't actally persist data this never has any data so
- # skip this for now.
- self.log.debug("No xact handle. Skipping apply config")
+ if action == rwdts.AppconfAction.INSTALL:
+ if self.reg:
+ for element in self.reg.elements:
+ self.log.error("Add VNFD on restart: {}".format(element.id))
+ self.add_vnfd(element)
+ else:
+ self.log.error("DTS handle is null for project {}".
+ format(self.project.name))
+ else:
+ self.log.debug("No xact handle. Skipping apply config")
return
add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
for cfg in update_cfgs:
self.update_vnfd(cfg)
- self.log.debug("Registering for VNFD catalog")
+ self.log.debug("Registering for VNFD catalog in project {}".
+ format(self.project.name))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self.dts.appconf_group_create(acg_handler) as acg:
+ xpath = self.project.add_project(VnfdCatalogDtsHandler.XPATH)
self.reg = acg.register(
- xpath=VnfdCatalogDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER,
)
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
class CfgAgentAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._log.debug("creating config agent account config handler")
self.cfg_agent_cfg_handler = rift.mano.config_agent.ConfigAgentSubscriber(
- self._dts, self._log,
+ self._dts, self._log, self._project,
rift.mano.config_agent.ConfigAgentCallbacks(
on_add_apply=self.on_cfg_agent_account_added,
on_delete_apply=self.on_cfg_agent_account_deleted,
self._log.debug("creating config agent account opdata handler")
self.cfg_agent_operdata_handler = rift.mano.config_agent.CfgAgentDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project
)
def on_cfg_agent_account_deleted(self, account):
self.cfg_agent_cfg_handler.register()
yield from self.cfg_agent_operdata_handler.register()
+ def deregister(self):
+ self.cfg_agent_operdata_handler.deregister()
+ self.cfg_agent_cfg_handler.deregister()
+
+
class CloudAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, loop, app):
+ def __init__(self, dts, log, log_hdl, loop, app, project):
self._log = log
self._log_hdl = log_hdl
self._dts = dts
self._loop = loop
self._app = app
+ self._project = project
- self._log.debug("creating cloud account config handler")
+ self._log.debug("Creating cloud account config handler for project {}".
+ format(project.name))
self.cloud_cfg_handler = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, self._log_hdl, self._project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.on_cloud_account_added,
on_delete_apply=self.on_cloud_account_deleted,
- )
+ ),
)
self._log.debug("creating cloud account opdata handler")
self.cloud_operdata_handler = rift.mano.cloud.CloudAccountDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, self._project,
)
def on_cloud_account_deleted(self, account_name):
self._log.debug("cloud account deleted")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self.cloud_operdata_handler.delete_cloud_account(account_name)
def on_cloud_account_added(self, account):
self._log.debug("cloud account added")
- self._app.accounts.clear()
- self._app.accounts.extend(list(self.cloud_cfg_handler.accounts.values()))
+ self._app.accounts[self._project.name] = \
+ list(self.cloud_cfg_handler.accounts.values())
self._log.debug("accounts: %s", self._app.accounts)
self.cloud_operdata_handler.add_cloud_account(account)
@asyncio.coroutine
def register(self):
- self.cloud_cfg_handler.register()
+ yield from self.cloud_cfg_handler.register()
yield from self.cloud_operdata_handler.register()
+ def deregister(self):
+ self.cloud_cfg_handler.deregister()
+ self.cloud_operdata_handler.deregister()
-class LaunchpadTasklet(rift.tasklets.Tasklet):
- UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
- UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
- UPLOAD_PORT = "4567"
+class ROAccountHandlers(object):
+ def __init__(self, dts, log, loop, app, project):
+ self._log = log
+ self._dts = dts
+ self._loop = loop
+ self._app = app
+ self._project = project
+
+ self._log.debug("Creating RO account config handler for project {}".
+ format(project.name))
+ self.ro_cfg_handler = rift.mano.ro_account.ROAccountConfigSubscriber(
+ self._dts, self._log, self._loop, self._project, None,
+ rift.mano.ro_account.ROAccountConfigCallbacks(
+ on_add_apply=self.on_ro_account_added,
+ on_delete_apply=self.on_ro_account_deleted,
+ ),
+ )
- def __init__(self, *args, **kwargs):
- super(LaunchpadTasklet, self).__init__(*args, **kwargs)
- self.rwlog.set_category("rw-mano-log")
- self.rwlog.set_subcategory("launchpad")
+ self._log.debug("Creating RO account opdata handler")
+ self.ro_operdata_handler = rift.mano.ro_account.ROAccountDtsOperdataHandler(
+ self._dts, self._log, self._loop, self._project
+ )
- self.app = None
- self.server = None
+ def on_ro_account_deleted(self, account_name):
+ self._log.debug(" launchpad tasklet RO account deleted")
+ self._app.ro_accounts[self._project.name] = \
+ list(self.ro_cfg_handler.accounts.values())
+ self.ro_operdata_handler.delete_ro_account(account_name)
+
+ def on_ro_account_added(self, account):
+ self._log.debug(" launchpad tasklet RO account added")
+ self._app.ro_accounts[self._project.name] = \
+ list(self.ro_cfg_handler.accounts.values())
+ self._log.debug("Accounts: %s", self._app.ro_accounts)
+ self.ro_operdata_handler.add_ro_account(account)
+
+ @asyncio.coroutine
+ def register(self):
+ yield from self.ro_cfg_handler.register()
+ yield from self.ro_operdata_handler.register()
+
+ def deregister(self):
+ self.ro_cfg_handler.deregister()
+ self.ro_operdata_handler.deregister()
+
+class StatusHandlers(object):
+ STATUS_MAP = {
+ downloader.DownloadStatus.STARTED: TaskStatus.QUEUED.value_nick.upper(),
+ downloader.DownloadStatus.IN_PROGRESS: TaskStatus.IN_PROGRESS.value_nick.upper(),
+ downloader.DownloadStatus.COMPLETED: TaskStatus.COMPLETED.value_nick.upper(),
+ downloader.DownloadStatus.FAILED: TaskStatus.FAILED.value_nick.upper(),
+ downloader.DownloadStatus.CANCELLED: TaskStatus.CANCELLED.value_nick.upper()
+ }
+
+ def __init__(self, dts, log, loop, app, project):
+ self.log = log
+ self.dts = dts
+ self.loop = loop
+ self.app = app
+ self.project = project
+
+ @abc.abstractmethod
+ def xpath(self, transaction_id=None):
+ return
+
+ @asyncio.coroutine
+ def register(self):
+ self.reg = yield from self.dts.register(xpath=self.xpath(),
+ flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+
+ assert self.reg is not None
+
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
+
+class UploadStatusHandlers(StatusHandlers):
+ """Publisher for status of onboarded packages.
+ """
+ def __init__(self, dts, log, loop, app, project):
+ super(UploadStatusHandlers, self).__init__(dts, log, loop, app, project)
+ self.reg = None
+ self.transaction_to_job_map = {}
+
+ def xpath(self, transaction_id=None):
+ return self.project.add_project("D,/rw-pkg-mgmt:create-jobs/rw-pkg-mgmt:job" +
+ ("[transaction-id={}]".format(quoted_key(transaction_id)) if transaction_id else ""))
+
+ def create_job_xpath(self):
+ return self.project.add_project("D,/rw-pkg-mgmt:create-jobs")
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_prepare(xact_info, action, ks_path, msg):
+ """ prepare callback from dts """
+
+ if action == rwdts.QueryAction.READ:
+ xpath = ks_path.to_xpath(RwPkgMgmtYang.get_schema())
+ path_entry = RwPkgMgmtYang.YangData_RwProject_Project_CreateJobs_Job().schema().keyspec_to_entry(ks_path)
+ transaction_id = path_entry.key00.transaction_id
+ if transaction_id:
+ create_job_msg = msg.as_dict()
+ if create_job_msg:
+ if transaction_id in self.transaction_to_job_map:
+ job = self.transaction_to_job_map[transaction_id]
+ xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+ xpath=xpath,
+ msg=job)
+ return
+ else:
+ jobs = RwPkgMgmtYang.YangData_RwProject_Project_CreateJobs()
+ for job in self.transaction_to_job_map.values():
+ jb = RwPkgMgmtYang.YangData_RwProject_Project_CreateJobs_Job.from_dict({
+ "transaction_id": job.transaction_id,
+ "status": job.status
+ })
+ jobs.job.append(jb)
+ xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+ xpath=self.create_job_xpath(),
+ msg=jobs)
+ return
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
+ with self.dts.group_create() as group:
+ self.reg = group.register(xpath=self.xpath(),
+ handler=hdl,
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ def upload_status(self, job, trans_id):
+ try:
+ create_job = RwPkgMgmtYang.YangData_RwProject_Project_CreateJobs_Job.from_dict({
+ "transaction_id": trans_id,
+ "status": StatusHandlers.STATUS_MAP[job.status]
+ })
+ self.transaction_to_job_map[trans_id] = create_job
+ except Exception as e:
+ self.log.error("Exception : {}".format(e))
+
+class UpdateStatusHandlers(StatusHandlers):
+ """Publisher for status of updated packages.
+ """
+ def __init__(self, dts, log, loop, app, project):
+ super(UpdateStatusHandlers, self).__init__(dts, log, loop, app, project)
+
+ def xpath(self, transaction_id=None):
+ return self.project.add_project("D,/rw-pkg-mgmt:update-jobs/rw-pkg-mgmt:job" +
+ ("[transaction-id={}]".format(quoted_key(transaction_id)) if transaction_id else ""))
+
+ @asyncio.coroutine
+ def schedule_dts_work(self, job, transaction_id):
+ # Publish the download state
+ create_job = RwPkgMgmtYang.YangData_RwProject_Project_UpdateJobs_Job.from_dict({
+ "transaction_id": transaction_id,
+ "status": StatusHandlers.STATUS_MAP[job.status]
+ })
+
+ self.reg.update_element(
+ self.xpath(transaction_id=transaction_id), create_job)
+
+ def update_status(self, job, trans_id):
+ self.log.debug("Download completed, writing status of task")
+ asyncio.ensure_future(self.schedule_dts_work(job, trans_id), loop=self.loop)
+
+class LaunchpadProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(LaunchpadProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ self._app = kw['app']
- self.account_handler = None
self.config_handler = None
self.nsd_catalog_handler = None
self.vld_catalog_handler = None
self.vnfd_catalog_handler = None
self.cloud_handler = None
- self.datacenter_handler = None
+ self.ro_handler = None
self.lp_config_handler = None
-
- self.vnfd_package_store = store.VnfdPackageFilesystemStore(self.log)
- self.nsd_package_store = store.NsdPackageFilesystemStore(self.log)
+ self.account_handler = None
+ self.upload_handlers = None
+ self.update_handlers = None
self.nsd_catalog = dict()
self.vld_catalog = dict()
self.vnfd_catalog = dict()
+ self.nsd_package_store = rift.package.store.NsdPackageFilesystemStore(tasklet.log,
+ project=name)
+ self.vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(tasklet.log,
+ project=name)
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def upload_status_handler(self):
+ return self.upload_handlers
+
+ @property
+ def update_status_handler(self):
+ return self.update_handlers
+
+ @asyncio.coroutine
+ def register(self):
+ self.log.debug("creating NSD catalog handler for project {}".format(self.name))
+ self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self._app)
+ yield from self.nsd_catalog_handler.register()
+
+ self.log.debug("creating VNFD catalog handler for project {}".format(self.name))
+ self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self._app)
+ yield from self.vnfd_catalog_handler.register()
+
+ self.log.debug("creating cloud account handler for project {}".format(self.name))
+ self.cloud_handler = CloudAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self._app, self)
+ yield from self.cloud_handler.register()
+
+ self.log.debug("creating RO account handler for project {}".format(self.name))
+ self.ro_handler = ROAccountHandlers(self.dts, self.log, self.loop, self._app, self)
+ yield from self.ro_handler.register()
+
+ self.log.debug("creating config agent handler for project {}".format(self.name))
+ self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl,
+ self.loop, self)
+ yield from self.config_handler.register()
+
+ self.log.debug("creating upload handler for project {}".format(self.name))
+ self.upload_handlers = UploadStatusHandlers(self.dts, self.log, self.loop,
+ self._app, self)
+ yield from self.upload_handlers.register()
+
+ self.log.debug("creating update handler for project {}".format(self.name))
+ self.update_handlers = UpdateStatusHandlers(self.dts, self.log, self.loop,
+ self._app, self)
+ yield from self.update_handlers.register()
+
+ def deregister(self):
+ self.log.debug("De-register handlers for project: {}".format(self.name))
+ self.config_handler.deregister()
+ self.cloud_handler.deregister()
+ self.ro_handler.deregister()
+ self.vnfd_catalog_handler.deregister()
+ self.nsd_catalog_handler.deregister()
+ self.update_handlers.deregister()
+ self.upload_handlers.deregister()
@property
def cloud_accounts(self):
return list(self.cloud_handler.cloud_cfg_handler.accounts.values())
+ @property
+ def ro_accounts(self):
+ if self.ro_handler is None:
+ return list()
+
+ return list(self.ro_handler.ro_cfg_handler.accounts.values())
+
+class LaunchpadTasklet(rift.tasklets.Tasklet):
+ UPLOAD_MAX_BODY_SIZE = MAX_BODY_SIZE
+ UPLOAD_MAX_BUFFER_SIZE = MAX_BUFFER_SIZE
+ UPLOAD_PORT = "4567"
+
+ def __init__(self, *args, **kwargs):
+ super(LaunchpadTasklet, self).__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+ self.rwlog.set_subcategory("launchpad")
+
+ self.dts = None
+ self.project_handler = None
+
+ self.app = None
+ self.server = None
+ self.projects = {}
+
+ def _get_project(self, project=None):
+ if project is None:
+ project = DEFAULT_PROJECT
+
+ if project in self.projects:
+ return self.projects[project]
+
+ msg = "Project {} not found".format(project)
+ self._log.error(msg)
+ raise LpProjectNotFound(msg)
+
+ def nsd_catalog_get(self, project=None):
+ return self._get_project(project=project).nsd_catalog
+
+ def vnfd_catalog_get(self, project=None):
+ return self._get_project(project=project).vnfd_catalog
+
+ def get_cloud_accounts(self, project=None):
+ return self._get_project(project=project).cloud_accounts
+
def start(self):
super(LaunchpadTasklet, self).start()
self.log.info("Starting LaunchpadTasklet")
self.log.exception("Caught Exception in LP stop")
raise
+ def get_vnfd_catalog(self, project):
+ return self.projects[project].vnfd_catalog
+
+ def get_nsd_catalog(self, project):
+ return self.projects[project].nsd_catalog
+
@asyncio.coroutine
def init(self):
- io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.app = uploader.UploaderApplication.from_tasklet(self)
- yield from self.app.register()
-
- manifest = self.tasklet_info.get_pb_manifest()
- ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
- ssl_key = manifest.bootstrap_phase.rwsecurity.key
- ssl_options = {
+ try:
+ io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.app = uploader.UploaderApplication.from_tasklet(self)
+ yield from self.app.register()
+
+ manifest = self.tasklet_info.get_pb_manifest()
+ ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
+ ssl_key = manifest.bootstrap_phase.rwsecurity.key
+ ssl_options = {
"certfile": ssl_cert,
"keyfile": ssl_key,
- }
-
- if manifest.bootstrap_phase.rwsecurity.use_ssl:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- ssl_options=ssl_options,
- )
-
- else:
- self.server = tornado.httpserver.HTTPServer(
- self.app,
- max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
- io_loop=io_loop,
- )
-
- self.log.debug("creating NSD catalog handler")
- self.nsd_catalog_handler = NsdCatalogDtsHandler(self, self.app)
- yield from self.nsd_catalog_handler.register()
-
- self.log.debug("creating VNFD catalog handler")
- self.vnfd_catalog_handler = VnfdCatalogDtsHandler(self, self.app)
- yield from self.vnfd_catalog_handler.register()
-
- self.log.debug("creating datacenter handler")
- self.datacenter_handler = datacenters.DataCenterPublisher(self.log, self.dts, self.loop)
- yield from self.datacenter_handler.register()
+ }
+
+ if manifest.bootstrap_phase.rwsecurity.use_ssl:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
+ ssl_options=ssl_options,
+ )
- self.log.debug("creating cloud account handler")
- self.cloud_handler = CloudAccountHandlers(
- self.dts, self.log, self.log_hdl, self.loop, self.app
+ else:
+ self.server = tornado.httpserver.HTTPServer(
+ self.app,
+ max_body_size=LaunchpadTasklet.UPLOAD_MAX_BODY_SIZE,
+ io_loop=io_loop,
)
- yield from self.cloud_handler.register()
- self.log.debug("creating config agent handler")
- self.config_handler = CfgAgentAccountHandlers(self.dts, self.log, self.log_hdl, self.loop)
- yield from self.config_handler.register()
+ self.log.debug("Registering project handler")
+ self.project_handler = ProjectHandler(self, LaunchpadProject,
+ app=self.app)
+ self.project_handler.register()
+
+ except Exception as e:
+ self.log.error("Exception : {}".format(e))
+ self.log.exception(e)
@asyncio.coroutine
def run(self):
- self.server.listen(LaunchpadTasklet.UPLOAD_PORT)
+ address = rwlib.getenv("RWVM_INTERNAL_IPADDR")
+ if (address is None):
+ address=""
+ self.server.listen(LaunchpadTasklet.UPLOAD_PORT, address=address)
+ self.server.listen(LaunchpadTasklet.UPLOAD_PORT, address="127.0.0.1")
def on_instance_started(self):
self.log.debug("Got instance started callback")
next_state = switch.get(state, None)
if next_state is not None:
self.dts.handle.set_state(next_state)
+
import threading
import uuid
import zlib
+import re
import tornado
import tornado.escape
import gi
gi.require_version('RwLaunchpadYang', '1.0')
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
)
import rift.mano.cloud
-import rift.package.charm
import rift.package.checksums
-import rift.package.config
import rift.package.convert
import rift.package.handler as pkg_handler
import rift.package.icon
from gi.repository import (
RwDts as rwdts,
- RwPkgMgmtYang)
+ RwPkgMgmtYang
+ )
import rift.downloader as downloader
import rift.mano.dts as mano_dts
import rift.tasklets
from .tosca import ExportTosca
+from .onboard import OnboardError as OnboardException
+
MB = 1024 * 1024
GB = 1024 * MB
RPC_PACKAGE_CREATE_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_PackageCreate
RPC_PACKAGE_UPDATE_ENDPOINT = RwPkgMgmtYang.YangOutput_RwPkgMgmt_PackageUpdate
-
-
class HttpMessageError(Exception):
def __init__(self, code, msg):
self.code = code
class UploadRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
log = self.application.get_logger(transaction_id)
log.message(OnboardStart())
+ self.log.debug("Package create RPC: {}".format(msg))
auth = None
if msg.username is not None:
auth = (msg.username, msg.password)
+ try:
+ project = msg.project_name
+ except AttributeError as e:
+ self._log.warning("Did not get project name in RPC: {}".
+ format(msg.as_dict()))
+ project = rift.mano.utils.project.DEFAULT_PROJECT
+
self.application.onboard(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=project,
)
rpc_op = RPC_PACKAGE_CREATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": project,
+ })
return rpc_op
class UpdateRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, application):
+ def __init__(self, application):
"""
Args:
application: UploaderApplication
"""
- super().__init__(log, dts, loop)
+ super().__init__(application.log, application.dts, application.loop)
self.application = application
@property
self.application.update(
msg.external_url,
transaction_id,
- auth=auth
+ auth=auth,
+ project=msg.project_name,
)
rpc_op = RPC_PACKAGE_UPDATE_ENDPOINT.from_dict({
- "transaction_id": transaction_id})
+ "transaction_id": transaction_id,
+ "project_name": msg.project_name,
+ })
return rpc_op
class UpdatePackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
- onboarder, uploader, package_store_map):
+ def __init__(self, log, loop, project, url, auth,
+ onboarder, uploader, package_store_map, transaction_id):
super().__init__()
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
self.uploader = uploader
self.package_store_map = package_store_map
+ self.transaction_id = transaction_id
def _update_package(self, packages):
with pkg as temp_package:
package_checksums = self.validate_package(temp_package)
stored_package = self.update_package(temp_package)
- self.validate_vnfd_fields(temp_package)
+ self.validate_descriptor_fields(temp_package)
try:
- self.extract_charms(temp_package)
- self.extract_scripts(temp_package)
- self.extract_configs(temp_package)
self.extract_icons(temp_package)
-
self.update_descriptors(temp_package)
except Exception:
except MessageException as e:
self.log.message(e.msg)
self.log.message(UpdateFailure())
+ raise UpdateFailure(str(e))
except Exception as e:
self.log.exception(e)
file_backed_packages = extractor.create_packages_from_upload(
job.filename, job.filepath
)
+ try:
+ self.extract(file_backed_packages)
+ except Exception as e:
+ raise Exception("Error in Package Update")
+
+ def on_download_finished(self, job):
+ self.log.debug("*** Download completed")
+ if hasattr(self.project, 'update_status_handler'):
+ self.project.update_status_handler.update_status(job, self.transaction_id)
- self.extract(file_backed_packages)
+ def on_download_progress(self, job):
+ self.log.debug("*** Download in progress")
+ if hasattr(self.project, 'update_status_handler'):
+ self.project.update_status_handler.update_status(job, self.transaction_id)
def on_download_failed(self, job):
self.log.error(job.detail)
)
try:
self.uploader.upload_image(image_name, image_checksum, image_hdl)
- self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+ self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum, self.project)
except image.ImageUploadError as e:
self.log.exception("Failed to upload image: %s", image_name)
finally:
_ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
- def extract_charms(self, package):
- try:
- charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
- charm_extractor.extract_charms(package)
- except rift.package.charm.CharmExtractionError as e:
- raise MessageException(UpdateExtractionError()) from e
-
- def extract_scripts(self, package):
- try:
- script_extractor = rift.package.script.PackageScriptExtractor(self.log)
- script_extractor.extract_scripts(package)
- except rift.package.script.ScriptExtractionError as e:
- raise MessageException(UpdateExtractionError()) from e
-
- def extract_configs(self, package):
- try:
- config_extractor = rift.package.config.PackageConfigExtractor(self.log)
- config_extractor.extract_configs(package)
- except rift.package.config.ConfigExtractionError as e:
- raise MessageException(UpdateExtractionError()) from e
-
def extract_icons(self, package):
try:
icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
except rift.package.icon.IconExtractionError as e:
raise MessageException(UpdateExtractionError()) from e
- def validate_vnfd_fields(self, package):
+ def validate_descriptor_fields(self, package):
# We can add more VNFD validations here. Currently we are validating only cloud-init
if package.descriptor_msg is not None:
self.validate_cloud_init_file(package)
self.log.message(UpdateDescriptorUpdate())
try:
- self.onboarder.update(descriptor_msg)
+ self.onboarder.update(descriptor_msg, project=self.project.name)
except onboard.UpdateError as e:
raise MessageException(UpdateDescriptorError(package.descriptor_file)) from e
class OnboardPackage(downloader.DownloaderProtocol):
- def __init__(self, log, loop, url, auth,
- onboarder, uploader, package_store_map):
+ def __init__(self, log, loop, project, url, auth,
+ onboarder, uploader, package_store_map, transaction_id):
self.log = log
self.loop = loop
+ self.project = project
self.url = url
self.auth = auth
self.onboarder = onboarder
self.uploader = uploader
self.package_store_map = package_store_map
+ self.transaction_id = transaction_id
def _onboard_package(self, packages):
# Extract package could return multiple packages if
with pkg as temp_package:
package_checksums = self.validate_package(temp_package)
stored_package = self.store_package(temp_package)
- self.validate_vnfd_fields(temp_package)
+ self.validate_descriptor_fields(temp_package)
try:
- self.extract_charms(temp_package)
- self.extract_scripts(temp_package)
- self.extract_configs(temp_package)
self.extract_icons(temp_package)
-
self.onboard_descriptors(temp_package)
- except Exception:
- self.delete_stored_package(stored_package)
+ except Exception as e:
+ if "data-exists" not in e.msg.text:
+ self.delete_stored_package(stored_package)
raise
-
else:
self.upload_images(temp_package, package_checksums)
except MessageException as e:
self.log.message(e.msg)
self.log.message(OnboardFailure())
+ raise OnboardException(OnboardFailure())
+
except Exception as e:
self.log.exception(e)
file_backed_packages = extractor.create_packages_from_upload(
job.filename, job.filepath
)
+ try:
+ self.extract(file_backed_packages)
+ except Exception as e:
+ raise Exception("Error in Onboarding Package")
+
+ def on_download_finished(self, job):
+ self.log.debug("*** Download completed")
+ if hasattr(self.project, 'upload_status_handler'):
+ self.project.upload_status_handler.upload_status(job, self.transaction_id)
- self.extract(file_backed_packages)
+ def on_download_progress(self, job):
+ self.log.debug("*** Download in progress")
+ if hasattr(self.project, 'upload_status_handler'):
+ self.project.upload_status_handler.upload_status(job, self.transaction_id)
def on_download_failed(self, job):
self.log.error(job.detail)
def download_package(self):
+ self.log.debug("Before pkg download, project = {}".format(self.project.name))
_, filename = tempfile.mkstemp()
url_downloader = downloader.UrlDownloader(
self.url,
package.open(image_file_map[image_name])
)
try:
- self.uploader.upload_image(image_name, image_checksum, image_hdl)
- self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum)
+ set_image_property = {}
+ self.uploader.upload_image(image_name, image_checksum, image_hdl, set_image_property)
+ self.uploader.upload_image_to_cloud_accounts(image_name, image_checksum, self.project.name)
except image.ImageUploadError as e:
- raise MessageException(OnboardImageUploadError()) from e
+ raise MessageException(OnboardImageUploadError(str(e))) from e
finally:
_ = [image_hdl.close() for image_hdl in name_hdl_map.values()]
- def extract_charms(self, package):
- try:
- charm_extractor = rift.package.charm.PackageCharmExtractor(self.log)
- charm_extractor.extract_charms(package)
- except rift.package.charm.CharmExtractionError as e:
- raise MessageException(OnboardExtractionError()) from e
-
- def extract_scripts(self, package):
- try:
- script_extractor = rift.package.script.PackageScriptExtractor(self.log)
- script_extractor.extract_scripts(package)
- except rift.package.script.ScriptExtractionError as e:
- raise MessageException(OnboardExtractionError()) from e
-
- def extract_configs(self, package):
- try:
- config_extractor = rift.package.config.PackageConfigExtractor(self.log)
- config_extractor.extract_configs(package)
- except rift.package.config.ConfigExtractionError as e:
- raise MessageException(OnboardExtractionError()) from e
-
def extract_icons(self, package):
try:
icon_extractor = rift.package.icon.PackageIconExtractor(self.log)
except rift.package.icon.IconExtractionError as e:
raise MessageException(OnboardExtractionError()) from e
- def validate_vnfd_fields(self, package):
- # We can add more VNFD validations here. Currently we are validating only cloud-init
+ def validate_descriptor_fields(self, package):
+ # We can add more VNFD/NSD validations here.
if package.descriptor_msg is not None:
self.validate_cloud_init_file(package)
+ self.validate_vld_mgmt_network(package)
+
+ def validate_vld_mgmt_network(self, package):
+ """ This is validation at onboarding of NSD for atleast one of the VL's to have mgmt network true
+ and have minimum one connection point"""
+ if package.descriptor_type == 'nsd':
+ for vld in package.descriptor_msg.as_dict().get('vld',[]):
+ if vld.get('mgmt_network', False) is True and \
+ len(vld.get('vnfd_connection_point_ref',[])) > 0 :
+ break
+ else:
+ self.log.error(("AtLeast One of the VL's should have Management Network as True "
+ "and have minimum one connection point"))
def validate_cloud_init_file(self, package):
""" This validation is for VNFDs with associated VDUs. """
return validators[0].checksums
def onboard_descriptors(self, package):
- descriptor_msg = package.descriptor_msg
+ def process_error_messsage(exception, package):
+ """
+ This method captures error reason. This needs to be enhanced with time.
+ """
+ exception_msg = str(exception)
+ match_duplicate = re.findall('<error-tag>(.*?)</error-tag>', exception_msg, re.DOTALL)
+
+ if len(match_duplicate) > 0:
+ error_message = str(match_duplicate[0])
+ return error_message
+
+ match = re.findall('<tailf:missing-element>(.*?)</tailf:missing-element>', exception_msg, re.DOTALL)
+ error_message = ""
+ if len(match) > 0:
+ for element in match:
+ element_message = "Missing element : {}".format(element)
+ error_message += element_message
+ else:
+ error_message = package.descriptor_file
+ return error_message
+
+ def process_exception(exception, package):
+ return OnboardDescriptorError(process_error_messsage(exception, package))
+ descriptor_msg = package.descriptor_msg
self.log.message(OnboardDescriptorOnboard())
try:
- self.onboarder.onboard(descriptor_msg)
+ self.onboarder.onboard(descriptor_msg, project=self.project.name)
except onboard.OnboardError as e:
- raise MessageException(OnboardDescriptorError(package.descriptor_file)) from e
+ raise MessageException(process_exception(e, package)) from e
class UploaderApplication(tornado.web.Application):
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
ssl_key = manifest.bootstrap_phase.rwsecurity.key
return cls(
- tasklet.log,
- tasklet.dts,
- tasklet.loop,
- ssl=(ssl_cert, ssl_key),
- vnfd_store=tasklet.vnfd_package_store,
- nsd_store=tasklet.nsd_package_store,
- vnfd_catalog=tasklet.vnfd_catalog,
- nsd_catalog=tasklet.nsd_catalog)
+ tasklet,
+ ssl=(ssl_cert, ssl_key))
def __init__(
self,
- log,
- dts,
- loop,
+ tasklet,
ssl=None,
vnfd_store=None,
- nsd_store=None,
- vnfd_catalog=None,
- nsd_catalog=None):
+ nsd_store=None):
- self.log = log
- self.loop = loop
- self.dts = dts
+ self.log = tasklet.log
+ self.loop = tasklet.loop
+ self.dts = tasklet.dts
+
+ self.accounts = {}
+ self.ro_accounts = {}
self.use_ssl = False
self.ssl_cert, self.ssl_key = None, None
self.use_ssl = True
self.ssl_cert, self.ssl_key = ssl
- if not vnfd_store:
- vnfd_store = rift.package.store.VnfdPackageFilesystemStore(self.log)
-
- if not nsd_store:
- nsd_store = rift.package.store.NsdPackageFilesystemStore(self.log)
-
- self.accounts = []
self.messages = collections.defaultdict(list)
- self.export_dir = os.path.join(os.environ['RIFT_ARTIFACTS'], 'launchpad/exports')
+ self.export_dir = os.path.join(os.environ['RIFT_VAR_ROOT'], 'launchpad/exports')
self.uploader = image.ImageUploader(self.log, self.loop, self.dts)
self.onboarder = onboard.DescriptorOnboarder(
self.log, "127.0.0.1", 8008, self.use_ssl, self.ssl_cert, self.ssl_key
)
- self.package_store_map = {
- "vnfd": vnfd_store,
- "nsd": nsd_store
- }
self.exporter = export.DescriptorPackageArchiveExporter(self.log)
self.loop.create_task(export.periodic_export_cleanup(self.log, self.loop, self.export_dir))
- self.vnfd_catalog = vnfd_catalog
- self.nsd_catalog = nsd_catalog
+ self.tasklet = tasklet
+ self.get_vnfd_catalog = tasklet.get_vnfd_catalog
+ self.get_nsd_catalog = tasklet.get_nsd_catalog
catalog_map = {
- "vnfd": self.vnfd_catalog,
- "nsd": self.nsd_catalog
+ "vnfd": self.get_vnfd_catalog,
+ "nsd": self.get_nsd_catalog
}
- self.upload_handler = UploadRpcHandler(self.log, self.dts, self.loop, self)
- self.update_handler = UpdateRpcHandler(self.log, self.dts, self.loop, self)
- self.export_handler = export.ExportRpcHandler(
- self.log,
- self.dts,
- self.loop,
- self,
- store_map=self.package_store_map,
- exporter=self.exporter,
- onboarder=self.onboarder,
- catalog_map=catalog_map
- )
+ self.upload_handler = UploadRpcHandler(self)
+ self.update_handler = UpdateRpcHandler(self)
+ self.export_handler = export.ExportRpcHandler(self, catalog_map)
attrs = dict(log=self.log, loop=self.loop)
super(UploaderApplication, self).__init__([
(r"/api/package/vnfd/(.*)", pkg_handler.FileRestApiHandler, {
- 'path': vnfd_store.root_dir}),
+ 'path': rift.package.store.VnfdPackageFilesystemStore.DEFAULT_ROOT_DIR}),
(r"/api/package/nsd/(.*)", pkg_handler.FileRestApiHandler, {
- 'path': nsd_store.root_dir}),
+ 'path': rift.package.store.NsdPackageFilesystemStore.DEFAULT_ROOT_DIR}),
(r"/api/upload/([^/]+)/state", UploadStateHandler, attrs),
(r"/api/update/([^/]+)/state", UpdateStateHandler, attrs),
def get_logger(self, transaction_id):
return message.Logger(self.log, self.messages[transaction_id])
- def onboard(self, url, transaction_id, auth=None):
+ def build_store_map(self, project=None):
+ ''' Use project information to build vnfd/nsd filesystem stores with appropriate
+ package directory root.
+ '''
+ vnfd_store = rift.package.store.VnfdPackageFilesystemStore(self.log) if not \
+ project else rift.package.store.VnfdPackageFilesystemStore(self.log, project=project)
+ nsd_store = rift.package.store.NsdPackageFilesystemStore(self.log) if not \
+ project else rift.package.store.NsdPackageFilesystemStore(self.log, project=project)
+
+ return dict(vnfd = vnfd_store, nsd = nsd_store)
+
+ def onboard(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
+ try:
+ self.project = self.tasklet._get_project(project)
+ except Exception as e:
+ self.log.error("Exception raised ...%s" % (str(e)))
+ self.log.exception(e)
+
+ self.package_store_map = self.build_store_map(project)
onboard_package = OnboardPackage(
log,
self.loop,
+ self.project,
url,
auth,
self.onboarder,
self.uploader,
self.package_store_map,
+ transaction_id
)
self.loop.run_in_executor(None, onboard_package.download_package)
- def update(self, url, transaction_id, auth=None):
+ def update(self, url, transaction_id, auth=None, project=None):
log = message.Logger(self.log, self.messages[transaction_id])
+ try:
+ self.project = self.tasklet._get_project(project)
+ except Exception as e:
+ self.log.error("Exception raised ...%s" % (str(e)))
+ self.log.exception(e)
+
+ self.package_store_map = self.build_store_map(project)
update_package = UpdatePackage(
log,
self.loop,
+ self.project,
url,
auth,
self.onboarder,
self.uploader,
self.package_store_map,
+ transaction_id
)
self.loop.run_in_executor(None, update_package.download_package)
-
self._log = log
self._args = args
+ self._project = args.project
+
self._pkgs = None
self._service_name = None
self._nsd_id = None
self._dc = None
- self._account = None
+ self._ro = None
self._ip = args.so_ip
self._api_server_ip = "localhost"
user=self._user,
passwd=self._password,
api_server_ip=self._api_server_ip)
+
self._upload_url = "curl -k https://{ip}:{port}/api/upload". \
format(ip=self._ip,
port=self._uport)
self._headers = '-H "accept: application/json"' + \
' -H "content-type: application/json"'
- self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config". \
+
+ self._conf_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/config/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
- self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational". \
+ port=self._rport,
+ project=self._project)
+
+ self._oper_url = "curl -k {header} --user \"{user}:{passwd}\" https://{ip}:{port}/api/operational/project/{project}". \
format(header=self._headers,
user=self._user,
passwd=self._password,
ip=self._ip,
- port=self._rport)
+ port=self._rport,
+ project=self._project)
@property
def log(self):
return self._log
def validate_args(self):
+ args = self._args
if args.upload_pkg is not None:
self._pkgs = args.upload_pkg
self.log.debug("Packages to upload: {}".format(self._pkgs))
raise OnboardPkgMissingDescId("NS Descriptor ID required for instantiation")
if args.datacenter:
- try:
- uuid.UUID(args.datacenter)
- self._dc = args.datacenter
- except ValueError as e:
- raise OnboardPkgInvalidDescId("Invalid UUID for datacenter: {}".
- format(args.datacenter))
-
- elif args.vim_account:
- self._account = args.vim_account
-
- else:
- raise OnboardPkgMissingAcct("Datacenter or VIM account required for instantiation")
+ self._dc = args.datacenter
+ if args.resource_orchestrator:
+ self._ro = args.resource_orchestrator
+
self._service_name = args.instantiate
self._nsd_id = args.nsd_id
self.log.debug("Instantiate NSD {} as {} on {}".format(self._nsd_id,
self._service_name,
- self._account))
+ self._dc))
- if (self._pkgs is None) and (self._nsd_id is None):
- raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate options")
+ if (self._pkgs is None) and (self._nsd_id is None) and (not args.list_nsds):
+ raise OnboardPkgInputError("Need to specify either upload-pkg or instantiate or list options")
# Validate the port numbers are correct
def valid_port(port):
self.log.debug("Check connectivity to SO at {}:{}, with credentials {}:{}".
format(self._ip, self._rport, self._user, self._password))
- rest_url = self._conf_url+"/resource-orchestrator"
+ rest_url = self._conf_url+"/ro-account"
try:
output = self._exec_cmd(rest_url)
self.log.debug("Output of restconf validation: {}".
self.log.debug("No NSD ID provided for instantiation")
return
- # Check to see if datacenter is valid
- if self._dc:
- dc_url = "{url}/datacenters". format(url=self._oper_url)
- output = self._exec_cmd(dc_url)
- if (output is None) or (len(output) == 0):
- # Account not found
- raise OnboardPkgDcError("Datacenter {} provided is not valid".
- format(self._dc))
- found = False
- js = json.loads(output)
- if "ro-accounts" in js["rw-launchpad:datacenters"]:
- for ro in js["rw-launchpad:datacenters"]["ro-accounts"]:
- if "datacenters" in ro:
- for dc in ro["datacenters"]:
- if dc["uuid"] == self._dc:
- self.log.debug("Found datacenter {}".format(dc))
- found = True
- break
- if found:
- break
-
- if found is False:
- raise OnboardPkgDcError("Datacenter {} provided is not valid".
- format(self._dc))
-
-
- # Check cloud account is valid, if provided
- if self._account:
- acct_url = "{url}/cloud/account/{acct}". \
- format(url=self._conf_url, acct=self._account)
- output = self._exec_cmd(acct_url)
- if (output is None) or (len(output) == 0):
- # Account not found
- raise OnboardPkgAcctError("VIM/Cloud account {} provided is not valid".
- format(self._account))
-
# Check id NSD ID is valid
nsd_url = "{url}/nsd-catalog/nsd/{nsd_id}". \
format(url=self._conf_url, nsd_id=self._nsd_id)
format(self._nsd_id,
js['error']))
- nsd = js['nsd:nsd']
+ try:
+ nsd = js['project-nsd:nsd']
+ except KeyError as e:
+ raise OnboardPkgNsdError("NSD ID {} provided is not valid".
+ format(self._nsd_id))
+
self.log.debug("NSD to instantiate: {}".format(nsd))
# Generate a UUID for NS
'name': self._service_name,
"nsd": nsd,}
if self._dc:
- nsr['om-datacenter'] = self._dc
- else:
- nsr['cloud-account'] = self._account
+ nsr['datacenter'] = self._dc
+
+ if self._ro:
+ nsr['resource-orchestrator'] = self._ro
data = {'nsr': [nsr]}
self.log.info("Successfully initiated instantiation of NS as {} ({})".
format(self._service_name, ns_id))
+ def list_nsds(self):
+ if self._args.list_nsds:
+ self.log.debug("Check NSDS at {}:{}, with credentials {}:{}".
+ format(self._ip, self._rport, self._user, self._password))
+
+ rest_url = self._conf_url+"/nsd-catalog/nsd"
+ try:
+ output = self._exec_cmd(rest_url)
+ self.log.debug("Output of NSD list: {}".
+ format(output))
+ if output:
+ js = json.loads(output)
+ if "error" in js:
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(js["error"]))
+ else:
+ print("No NSDs found on SO")
+ return
+
+ self.log.debug("NSD list: {}".format(js))
+ print('List of NSDs on SO:\nName\tID')
+ for nsd in js['project-nsd:nsd']:
+ print('{}\t{}'.format(nsd['name'], nsd['id']))
+
+ except OnboardPkgCmdError as e:
+ self.log.error("SO restconf connect failed: {}".format(e))
+ raise OnboardPkgRcConnError("SO Restconf connect error: {}".
+ format(e))
+
def process(self):
try:
self.validate_args()
self.validate_connectivity()
self.upload_packages()
self.instantiate()
+ self.list_nsds()
if __name__ == "__main__":
help="Descriptor packages to upload. " + \
"If multiple descriptors are provided, they are uploaded in the same sequence.")
+ parser.add_argument("-l", "--list-nsds", action='store_true',
+ help="List available network service descriptors")
+
parser.add_argument("-i", "--instantiate",
help="Instantiate a network service with the name")
parser.add_argument("-d", "--nsd-id",
help="Network descriptor ID to instantiate")
parser.add_argument("-D", "--datacenter",
help="OpenMano datacenter to instantiate on")
- parser.add_argument("-c", "--vim-account",
- help="Cloud/VIM account to instantiate on")
+ parser.add_argument("-r", "--resource-orchestrator",
+ help="RO account to instantiate on")
+ parser.add_argument("--project", default='default',
+ help="Project to use, default 'default'")
parser.add_argument("-o", "--onboard-port", default=8443, type=int,
help="Onboarding port number - node port number, default 8443")
parser.add_argument("-p", "--upload-port", default=4567, type=int,
import uuid
import xmlrunner
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import rift.package.archive
-import rift.package.charm
import rift.package.checksums
-import rift.package.config
import rift.package.convert
import rift.package.icon
import rift.package.package
from rift.tasklets.rwlaunchpad import export
import gi
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- RwVnfdYang,
- VnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ ProjectVnfdYang as VnfdYang,
)
import utest_package
self._vnfd_serializer = rift.package.convert.VnfdSerializer()
def test_create_archive(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description"
)
json_desc_str = self._rw_vnfd_serializer.to_json_string(rw_vnfd_msg)
self.assertEqual(package.descriptor_msg, rw_vnfd_msg)
def test_export_package(self):
- rw_vnfd_msg = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd(
+ rw_vnfd_msg = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd(
id="new_id", name="new_name", description="new_description",
meta="THIS FIELD IS NOT IN REGULAR VNFD"
)
- vnfd_msg = VnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd()
+ vnfd_msg = VnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd()
vnfd_msg.from_dict(rw_vnfd_msg.as_dict(), ignore_missing_keys=True)
self.assertNotEqual(rw_vnfd_msg, vnfd_msg)
from rift.package.handler import FileRestApiHandler
import gi
-gi.require_version('NsdYang', '1.0')
-gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
- VnfdYang,
+ ProjectNsdYang as NsdYang,
+ ProjectVnfdYang as VnfdYang,
)
import base64
import concurrent.futures
import io
+import json
import logging
import os
import sys
import uuid
import xmlrunner
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
from rift.package import convert
from rift.tasklets.rwlaunchpad import onboard
import rift.test.dts
+import functools
import gi
gi.require_version('NsdYang', '1.0')
gi.require_version('VnfdYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
NsdYang,
VnfdYang,
+ ProjectNsdYang,
+ ProjectVnfdYang,
)
class RestconfDescriptorHandler(tornado.web.RequestHandler):
- DESC_SERIALIZER_MAP = {
- "nsd": convert.NsdSerializer(),
- "vnfd": convert.VnfdSerializer(),
- }
-
class AuthError(Exception):
pass
self._verify_content_type_header()
def _verify_request_body(self, descriptor_type):
- if descriptor_type not in RestconfDescriptorHandler.DESC_SERIALIZER_MAP:
+ if descriptor_type not in ['nsd', 'vnfd']:
raise ValueError("Unsupported descriptor type: %s" % descriptor_type)
- body = self.request.body
- bytes_hdl = io.BytesIO(body)
-
- serializer = RestconfDescriptorHandler.DESC_SERIALIZER_MAP[descriptor_type]
+ body = convert.decode(self.request.body)
+ self._logger.debug("Received msg: {}".format(body))
try:
- message = serializer.from_file_hdl(bytes_hdl, ".json")
+ message = json.loads(body)
except convert.SerializationError as e:
self.set_status(400)
self._transforms = []
self._info.last_request_message = message
- self._logger.debug("Received a valid descriptor request")
+ self._logger.debug("Received a valid descriptor request: {}".format(message))
def put(self, descriptor_type):
self._info.last_descriptor_type = descriptor_type
class OnboardTestCase(tornado.testing.AsyncHTTPTestCase):
- AUTH = ("admin", "admin")
+ DESC_SERIALIZER_MAP = {
+ "nsd": convert.NsdSerializer(),
+ "vnfd": convert.VnfdSerializer(),
+ }
+
+ AUTH = ("admin","admin")
def setUp(self):
self._log = logging.getLogger(__file__)
self._loop = asyncio.get_event_loop()
def get_app(self):
attrs = dict(auth=OnboardTestCase.AUTH, log=self._log, info=self._handler_info)
return tornado.web.Application([
- (r"/api/config/.*/(nsd|vnfd)", RestconfDescriptorHandler, attrs),
+ (r"/api/config/project/default/.*/(nsd|vnfd)",
+ RestconfDescriptorHandler, attrs),
])
+
+ def get_msg(self, desc=None):
+ if desc is None:
+ desc = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ serializer = OnboardTestCase.DESC_SERIALIZER_MAP['nsd']
+ jstr = serializer.to_json_string(desc, project_ns=False)
+ self._desc = jstr
+ hdl = io.BytesIO(str.encode(jstr))
+ return serializer.from_file_hdl(hdl, ".json")
+
+ def get_json(self, msg):
+ serializer = OnboardTestCase.DESC_SERIALIZER_MAP['nsd']
+ json_data = serializer.to_json_string(msg, project_ns=True)
+ return json.loads(json_data)
+
@rift.test.dts.async_test
def test_onboard_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
- yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
- self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+ nsd_msg = self.get_msg()
+ yield from self._loop.run_in_executor(None, functools.partial(self._onboarder.onboard, descriptor_msg=nsd_msg, auth=OnboardTestCase.AUTH))
+ self.assertEqual(self._handler_info.last_request_message, self.get_json(nsd_msg))
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
self.assertEqual(self._handler_info.last_method, "POST")
@rift.test.dts.async_test
def test_update_nsd(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
- yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
- self.assertEqual(self._handler_info.last_request_message, nsd_msg)
+ nsd_msg = self.get_msg()
+ yield from self._loop.run_in_executor(None, functools.partial(self._onboarder.update, descriptor_msg=nsd_msg, auth=OnboardTestCase.AUTH))
+ self.assertEqual(self._handler_info.last_request_message, self.get_json(nsd_msg))
self.assertEqual(self._handler_info.last_descriptor_type, "nsd")
self.assertEqual(self._handler_info.last_method, "PUT")
@rift.test.dts.async_test
def test_bad_descriptor_type(self):
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
with self.assertRaises(TypeError):
yield from self._loop.run_in_executor(None, self._onboarder.update, nsd_msg)
# Use a port not used by the instantiated server
new_port = self._port - 1
self._onboarder.port = new_port
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
with self.assertRaises(onboard.OnboardError):
yield from self._loop.run_in_executor(None, self._onboarder.onboard, nsd_msg)
# Set the timeout to something minimal to speed up test
self._onboarder.timeout = .1
- nsd_msg = NsdYang.YangData_Nsd_NsdCatalog_Nsd(id=str(uuid.uuid4()), name="nsd_name")
+ nsd_msg = self.get_msg()
# Force the request to timeout by running the call synchronously so the
with self.assertRaises(onboard.OnboardError):
import xmlrunner
import yaml
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import rift.package.archive
import rift.package.package
-import rift.package.charm
import rift.package.icon
import rift.package.script
-import rift.package.config
import rift.package.store
import rift.package.checksums
import rift.package.cloud_init
-import gi
-gi.require_version('RwpersonDbYang', '1.0')
-gi.require_version('RwYang', '1.0')
-
-from gi.repository import (
- RwpersonDbYang,
- RwYang,
- )
-
-
nsd_yaml = b"""nsd:nsd-catalog:
nsd:nsd:
- nsd:id: gw_corpA
self.assertEquals(yaml.load(vnfd_data), yaml.load(vnfd_yaml))
-class TestPackageCharmExtractor(PackageTestCase):
- def add_charm_dir(self, charm_name):
- charm_dir = "charms/trusty/{}".format(charm_name)
- charm_file = "{}/actions.yaml".format(charm_dir)
- charm_text = b"THIS IS A FAKE CHARM"
- self.add_tarinfo_dir(charm_dir)
- self.add_tarinfo(charm_file, io.BytesIO(charm_text))
-
- def test_extract_charm(self):
- charm_name = "charm_a"
- self.add_charm_dir(charm_name)
- package = self.create_vnfd_package()
- with tempfile.TemporaryDirectory() as tmp_dir:
- extractor = rift.package.charm.PackageCharmExtractor(self._log, tmp_dir)
- extractor.extract_charms(package)
-
- charm_dir = extractor.get_extracted_charm_dir(package.descriptor_id, charm_name)
- self.assertTrue(os.path.exists(charm_dir))
- self.assertTrue(os.path.isdir(charm_dir))
-
class TestPackageIconExtractor(PackageTestCase):
def add_icon_file(self, icon_name):
with self.assertRaises(rift.package.cloud_init.CloudInitExtractionError):
extractor.read_script(package, script_name)
-class TestPackageConfigExtractor(PackageTestCase):
- def add_ns_config_file(self, nsd_id):
- config_file = "ns_config/{}.yaml".format(nsd_id)
- config_text = b""" ns_config """
- self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
-
- return config_file
-
- def add_vnf_config_file(self, vnfd_id, member_vnf_index):
- config_file = "vnf_config/{}_{}.yaml".format(vnfd_id, member_vnf_index)
- config_text = b""" vnf_config """
- self.add_tarinfo(config_file, io.BytesIO(config_text), mode=0o666)
-
- return config_file
-
- def test_extract_config(self):
- ns_config_file = self.add_ns_config_file("nsd_id")
- vnf_config_file = self.add_vnf_config_file("vnfd_id", 1)
- package = self.create_nsd_package()
- with tempfile.TemporaryDirectory() as tmp_dir:
- extractor = rift.package.config.PackageConfigExtractor(self._log, tmp_dir)
- extractor.extract_configs(package)
-
- dest_ns_config_file = extractor.get_extracted_config_path(package.descriptor_id, ns_config_file)
- dest_vnf_config_file = extractor.get_extracted_config_path(package.descriptor_id, vnf_config_file)
- self.assertTrue(os.path.isfile(dest_ns_config_file))
- self.assertTrue(os.path.isfile(dest_vnf_config_file))
-
-
class TestPackageValidator(PackageTestCase):
def setUp(self):
super().setUp()
import gi
gi.require_version('RwpersonDbYang', '1.0')
+gi.require_version('RwProjectPersonDbYang', '1.0')
gi.require_version('RwYang', '1.0')
from gi.repository import (
RwpersonDbYang,
+ RwProjectPersonDbYang,
RwYang,
)
+from rift.package.convert import SerializationError
+
+
class TestSerializer(unittest.TestCase):
def setUp(self):
self._serializer = ProtoMessageSerializer(
RwpersonDbYang,
- RwpersonDbYang.Person
+ RwpersonDbYang.Person,
+ RwProjectPersonDbYang,
+ RwProjectPersonDbYang.YangData_RwProject_Project_Person,
)
self._sample_person = RwpersonDbYang.Person(name="Fred")
- self._model = RwYang.model_create_libncx()
+ self._project_person = RwProjectPersonDbYang.YangData_RwProject_Project_Person(name="Fred")
+ self._model = RwYang.model_create_libyang()
self._model.load_schema_ypbc(RwpersonDbYang.get_schema())
def test_from_xml_file(self):
with io.StringIO(sample_person_yaml) as file_hdl:
person = self._serializer.from_file_hdl(file_hdl, ".yml")
- self.assertEqual(person, self._sample_person)
+ self.assertEqual(person, self._project_person)
def test_from_json_file(self):
sample_person_json = self._sample_person.to_json(self._model)
with io.StringIO(sample_person_json) as file_hdl:
person = self._serializer.from_file_hdl(file_hdl, ".json")
- self.assertEqual(person, self._sample_person)
+ self.assertEqual(person, self._project_person)
def test_unknown_file_extension(self):
with io.StringIO("asdf") as file_hdl:
self.assertEqual(person, self._sample_person)
def test_to_json_string_invalid_type(self):
- with self.assertRaises(TypeError):
+ with self.assertRaises(SerializationError):
self._serializer.to_json_string(RwpersonDbYang.FlatPerson(name="bob"))
import tornado.web
import tornado.httputil
-import gi
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import requests
from tornado.platform.asyncio import AsyncIOMainLoop
from tornado.ioloop import IOLoop
from concurrent.futures.thread import ThreadPoolExecutor
from concurrent.futures.process import ProcessPoolExecutor
+
+import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
RwDts as rwdts,
RwPkgMgmtYang,
- RwVnfdYang
-
+ RwProjectVnfdYang as RwVnfdYang,
)
import rift.tasklets.rwlaunchpad.uploader as uploader
import rift.tasklets.rwlaunchpad.message as message
import rift.tasklets.rwlaunchpad.export as export
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
import rift.test.dts
+import rift.package.store
+
import mock
TEST_STRING = "foobar"
mock_vnfd_catalog = mock.MagicMock()
- self.uid, path = self.create_mock_package()
+ self.uid, path = self.create_mock_package(DEFAULT_PROJECT)
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
"id": self.uid
})
mock_vnfd_catalog = {self.uid: mock_vnfd}
- self.app = uploader.UploaderApplication(
- self.log,
- self.dts,
- self.loop,
- vnfd_catalog=mock_vnfd_catalog)
+ class MockTasklet:
+ def __init__(cls):
+ def get_vnfd_catalog(project=DEFAULT_PROJECT):
+ return mock_vnfd_catalog
+
+ cls.log = self.log
+ cls.loop = self.loop
+ cls.dts = self.dts
+ cls.get_vnfd_catalog = get_vnfd_catalog
+ cls.get_nsd_catalog = None
+ cls.project = None
+ def _get_project(cls, project_name):
+ if cls.project is None:
+ cls.project = ManoProject(cls.log, project_name)
+ return cls.project
+
+ vnfd_store = rift.package.store.VnfdPackageFilesystemStore(self.log, project=DEFAULT_PROJECT)
+ nsd_store = rift.package.store.NsdPackageFilesystemStore(self.log, project=DEFAULT_PROJECT)
+
+ self.app = uploader.UploaderApplication(MockTasklet(), vnfd_store=vnfd_store, nsd_store=nsd_store)
+ self.app.onboarder.get_updated_descriptor = mock.MagicMock(return_value={'vnfd:vnfd':{'name':'mock', 'version':'mock'}})
+ self.app.onboarder.onboard = mock.MagicMock()
+ self.app.onboarder.update = mock.MagicMock()
AsyncIOMainLoop().install()
self.server = tornado.httpserver.HTTPServer(
def tearDown(self):
super().tearDown()
- def create_mock_package(self):
+ def create_mock_package(self, project):
uid = str(uuid.uuid4())
path = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
+ os.getenv('RIFT_VAR_ROOT'),
"launchpad/packages/vnfd",
+ project,
uid)
package_path = os.path.join(path, "pong_vnfd")
yield from self.app.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCreate.from_dict({
"package_type": "VNFD",
- "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz"
+ "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.4.2/ping_vnfd.tar.gz",
+ "project_name": DEFAULT_PROJECT
})
rpc_out = yield from self.dts.query_rpc(
# Update
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageUpdate.from_dict({
"package_type": "VNFD",
- "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.2.1/VNFS/ping_vnfd.tar.gz"
+ "external_url": "http://repo.riftio.com/releases/open.riftio.com/4.4.2/ping_vnfd.tar.gz",
+ "project_name": DEFAULT_PROJECT
})
rpc_out = yield from self.dts.query_rpc(
"I,/rw-pkg-mgmt:package-update",
data = data[1]
assert type(data) is message.DownloadSuccess
-
@rift.test.dts.async_test
def test_package_export(self):
"""
data = data[-1]
assert type(data) is export.ExportSuccess
path = os.path.join(
- os.getenv("RIFT_ARTIFACTS"),
+ os.getenv("RIFT_VAR_ROOT"),
"launchpad/exports",
filename)
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/core.py
rift/tasklets/${TASKLET_NAME}/tasklet.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
except Exception:
return list()
-
class MonascaPluginFactory(PluginFactory):
PLUGIN_NAME = "monasca"
FALLBACKS = ["ceilometer",]
return impl
+class BrocadeVcpePluginFactory(PluginFactory):
+ PLUGIN_NAME = "brocade_vcpe"
+ FALLBACKS = ["unavailable",]
+
+ def create(self, cloud_account):
+ plugin = rw_peas.PeasPlugin("rwmon_brocade", 'RwMon-1.0')
+ impl = plugin.get_interface("Monitoring")
+
+ # Check that the plugin is available on the platform associated with
+ # the cloud account
+ _, available = impl.nfvi_metrics_available(cloud_account)
+ if not available:
+ raise PluginUnavailableError()
+
+ return impl
class UnavailablePluginFactory(PluginFactory):
PLUGIN_NAME = "unavailable"
self.register_plugin_factory(CeilometerPluginFactory())
self.register_plugin_factory(MonascaPluginFactory())
self.register_plugin_factory(UnavailablePluginFactory())
+ self.register_plugin_factory(BrocadeVcpePluginFactory())
+
@property
def log(self):
try:
factory = self._factories[name]
plugin = factory.create(cloud_account)
+
self._plugins[cloud_account.name] = plugin
return
self._account = account
self._plugin = plugin
self._timestamp = 0
- self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ self._metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
self._vdur = vdur
self._vim_id = vdur.vim_id
self._updating = None
None,
self._plugin.nfvi_metrics,
self._account,
- self._vim_id,
+ self._vim_id
),
timeout=NfviMetrics.TIMEOUT,
loop=self.loop,
try:
# Create uninitialized metric structure
- vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ vdu_metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
# VCPU
vdu_metrics.vcpu.total = self.vdur.vm_flavor.vcpu_count
vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate
# External ports
- vdu_metrics.external_ports.total = len(self.vdur.external_interface)
+ vdu_metrics.external_ports.total = len([intf for intf in self.vdur.interface if intf.type_yang == 'EXTERNAL'])
# Internal ports
- vdu_metrics.internal_ports.total = len(self.vdur.internal_interface)
+ vdu_metrics.internal_ports.total = len([intf for intf in self.vdur.interface if intf.type_yang == 'INTERNAL'])
self._metrics = vdu_metrics
different sub-systems that are used to monitor the NFVI.
"""
- def __init__(self, loop, log, config):
+ def __init__(self, loop, log, config, project):
"""Create a Monitor object
Arguments:
- loop - an event loop
- log - the logger used by this object
- config - an instance of InstanceConfiguration
+ loop - an event loop
+ log - the logger used by this object
+ config - an instance of InstanceConfiguration
+ project - an instance of the project
"""
self._loop = loop
self._log = log
+ self._project = project
self._cloud_accounts = dict()
self._nfvi_plugins = NfviMetricsPluginManager(log)
"""The event log used by this object"""
return self._log
+ @property
+ def project(self):
+ return self._project
+
@property
def cache(self):
"""The NFVI metrics cache"""
if account.account_type == "openstack":
self.register_cloud_account(account, "monasca")
+ elif account.account_type == "prop_cloud1":
+ self.register_cloud_account(account, "brocade_vcpe")
else:
self.register_cloud_account(account, "mock")
# Make sure that there are no VNFRs associated with this account
for vnfr in self._vnfrs.values():
- if vnfr.cloud_account == account_name:
+ if vnfr.datacenter == account_name:
raise AccountInUseError()
del self._cloud_accounts[account_name]
the monitor.
"""
- if vnfr.cloud_account not in self._cloud_accounts:
+ if vnfr.datacenter not in self._cloud_accounts:
raise UnknownAccountError()
- account = self._cloud_accounts[vnfr.cloud_account]
+ account = self._cloud_accounts[vnfr.datacenter]
for vdur in vnfr.vdur:
try:
the monitor.
"""
- if vnfr.cloud_account not in self._cloud_accounts:
+ if vnfr.datacenter not in self._cloud_accounts:
raise UnknownAccountError()
- account = self._cloud_accounts[vnfr.cloud_account]
+ account = self._cloud_accounts[vnfr.datacenter]
for vdur in vnfr.vdur:
try:
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import asyncio
import concurrent.futures
+import gi
import time
-
-import tornado.web
import tornado.httpserver
+import tornado.web
-import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwLog', '1.0')
gi.require_version('RwMonitorYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('rwlib', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
from gi.repository import (
RwDts as rwdts,
RwVnfrYang,
VnfrYang,
)
-
+import gi.repository.rwlib as rwlib
import rift.tasklets
import rift.mano.cloud
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from . import core
class DtsHandler(object):
- def __init__(self, tasklet):
+ def __init__(self, project):
self.reg = None
- self.tasklet = tasklet
+ self.project = project
@property
def log(self):
- return self.tasklet.log
+ return self.project._log
@property
def log_hdl(self):
- return self.tasklet.log_hdl
+ return self.project._log_hdl
@property
def dts(self):
- return self.tasklet.dts
+ return self.project._dts
@property
def loop(self):
- return self.tasklet.loop
+ return self.project._loop
@property
def classname(self):
with self.dts.group_create() as group:
group.register(
- xpath=VnfrCatalogSubscriber.XPATH,
+ xpath=self.project.add_project(VnfrCatalogSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler,
)
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath=NsInstanceConfigSubscriber.XPATH,
+ xpath=self.project.add_project(NsInstanceConfigSubscriber.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
)
class CloudAccountDtsHandler(DtsHandler):
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._cloud_cfg_subscriber = None
def register(self):
self.log.debug("creating cloud account config handler")
self._cloud_cfg_subscriber = rift.mano.cloud.CloudAccountConfigSubscriber(
- self.dts, self.log, self.log_hdl,
+ self.dts, self.log, self.log_hdl, self.project,
rift.mano.cloud.CloudAccountConfigCallbacks(
on_add_apply=self.tasklet.on_cloud_account_create,
on_delete_apply=self.tasklet.on_cloud_account_delete,
from a single VDU.
"""
- XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id='{}']/vnfr:vdur[vnfr:id='{}']/rw-vnfr:nfvi-metrics"
+ XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]/vnfr:vdur[vnfr:id={}]/rw-vnfr:nfvi-metrics"
# This timeout defines the length of time the publisher will wait for a
# request to a data source to complete. If the request cannot be completed
# before timing out, the current data will be published instead.
TIMEOUT = 2.0
- def __init__(self, tasklet, vnfr, vdur):
+ def __init__(self, project, vnfr, vdur):
"""Create an instance of VdurNvfiPublisher
Arguments:
vdur - the VDUR of the VDU whose metrics are published
"""
- super().__init__(tasklet)
+ super().__init__(project)
self._vnfr = vnfr
self._vdur = vdur
self._handle = None
- self._xpath = VdurNfviMetricsPublisher.XPATH.format(vnfr.id, vdur.id)
+ self._xpath = project.add_project(VdurNfviMetricsPublisher.XPATH.format(quoted_key(vnfr.id), quoted_key(vdur.id)))
self._deregistered = asyncio.Event(loop=self.loop)
with self.dts.appconf_group_create(acg_handler) as acg:
self.reg = acg.register(
- xpath="C,/rw-launchpad:launchpad-config",
+ xpath=self.project.add_project("C,/rw-launchpad:launchpad-config"),
flags=rwdts.Flag.SUBSCRIBER,
)
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
response = VnfrYang.YangOutput_Vnfr_CreateAlarm()
response.alarm_id = yield from self.tasklet.on_create_alarm(
msg.cloud_account,
them on to the tasklet.
"""
- def __init__(self, tasklet):
- super().__init__(tasklet)
+ def __init__(self, project):
+ super().__init__(project)
self._handle = None
@asyncio.coroutine
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
try:
+ if not self.project.rpc_check(msg, xact_info=xact_info):
+ return
+
yield from self.tasklet.on_destroy_alarm(
msg.cloud_account,
msg.alarm_id,
])
-class MonitorTasklet(rift.tasklets.Tasklet):
- """
- The MonitorTasklet provides a interface for DTS to interact with an
- instance of the Monitor class. This allows the Monitor class to remain
- independent of DTS.
- """
+class MonitorProject(ManoProject):
- DEFAULT_POLLING_PERIOD = 1.0
+ def __init__(self, name, tasklet, **kw):
+ super(MonitorProject, self).__init__(log, name)
+ self._tasklet = tasklet
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
+ self.vnfr_subscriber = VnfrCatalogSubscriber(self)
+ self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
+ self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
+ self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
- self.vnfr_subscriber = VnfrCatalogSubscriber(self)
- self.cloud_cfg_subscriber = CloudAccountDtsHandler(self)
- self.ns_instance_config_subscriber = NsInstanceConfigSubscriber(self)
- self.launchpad_cfg_subscriber = LaunchpadConfigDtsSubscriber(self)
+ self.config = core.InstanceConfiguration()
+ self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
- self.config = core.InstanceConfiguration()
- self.config.polling_period = MonitorTasklet.DEFAULT_POLLING_PERIOD
+ self.monitor = core.Monitor(self.loop, self.log, self.config, self)
+ self.vdur_handlers = dict()
- self.monitor = core.Monitor(self.loop, self.log, self.config)
- self.vdur_handlers = dict()
-
- self.webhooks = None
- self.create_alarm_rpc = CreateAlarmRPC(self)
- self.destroy_alarm_rpc = DestroyAlarmRPC(self)
-
-
- except Exception as e:
- self.log.exception(e)
-
- @property
- def polling_period(self):
- return self.config.polling_period
-
- @property
- def public_ip(self):
- """The public IP of the launchpad"""
- return self.config.public_ip
-
- def start(self):
- super().start()
- self.log.info("Starting MonitoringTasklet")
-
- self.log.debug("Registering with dts")
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.log.debug("Created DTS Api GI Object: %s", self.dts)
-
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
+ self.create_alarm_rpc = CreateAlarmRPC(self)
+ self.destroy_alarm_rpc = DestroyAlarmRPC(self)
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating cloud account handler")
self.cloud_cfg_subscriber.register()
self.log.debug("creating destroy-alarm rpc handler")
yield from self.destroy_alarm_rpc.register()
- self.log.debug("creating webhook server")
- loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.webhooks = WebhookApplication(self)
- self.server = tornado.httpserver.HTTPServer(
- self.webhooks,
- io_loop=loop,
- )
- @asyncio.coroutine
- def on_public_ip(self, ip):
- """Store the public IP of the launchpad
-
- Arguments:
- ip - a string containing the public IP address of the launchpad
+ @property
+ def polling_period(self):
+ return self.config.polling_period
- """
- self.config.public_ip = ip
+ @property
+ def public_ip(self):
+ """The public IP of the launchpad"""
+ return self.config.public_ip
def on_ns_instance_config_update(self, config):
"""Update configuration information
def on_cloud_account_delete(self, account_name):
self.monitor.remove_cloud_account(account_name)
- @asyncio.coroutine
- def run(self):
- self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT)
-
- def on_instance_started(self):
- self.log.debug("Got instance started callback")
-
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
-
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
-
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
-
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
-
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
-
def on_vnfr_create(self, vnfr):
- if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
+ try:
+ acc = vnfr.cloud_account
+ except AttributeError as e:
+ self.log.warning("NFVI metrics not supported")
+ return
+
+ if not self.monitor.nfvi_metrics_available(acc):
msg = "NFVI metrics unavailable for {}"
- self.log.warning(msg.format(vnfr.cloud_account))
+ self.log.warning(msg.format(acc))
return
self.monitor.add_vnfr(vnfr)
self.loop.create_task(coro)
def on_vnfr_update(self, vnfr):
+ try:
+ acc = vnfr.cloud_account
+ except AttributeError as e:
+ self.log.warning("NFVI metrics not supported")
+ return
+
if not self.monitor.nfvi_metrics_available(vnfr.cloud_account):
msg = "NFVI metrics unavailable for {}"
self.log.warning(msg.format(vnfr.cloud_account))
"""
yield from self.monitor.destroy_alarm(account, alarm_id)
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ # Check if any cloud accounts present
+ if self.cloud_cfg_subscriber and self.cloud_cfg_subscriber._cloud_cfg_subscriber.accounts:
+ return False
+ return True
+
+
+class MonitorTasklet(rift.tasklets.Tasklet):
+ """
+ The MonitorTasklet provides a interface for DTS to interact with an
+ instance of the Monitor class. This allows the Monitor class to remain
+ independent of DTS.
+ """
+
+ DEFAULT_POLLING_PERIOD = 1.0
+
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+
+ self._project_handler = None
+ self.projects = {}
+
+ self.webhooks = None
+
+ except Exception as e:
+ self.log.exception(e)
+
+ def start(self):
+ super().start()
+ self.log.info("Starting MonitoringTasklet")
+
+ self.log.debug("Registering with dts")
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwLaunchpadYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api GI Object: %s", self.dts)
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating webhook server")
+ loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
+ self.webhooks = WebhookApplication(self)
+ self.server = tornado.httpserver.HTTPServer(
+ self.webhooks,
+ io_loop=loop,
+ )
+
+ @asyncio.coroutine
+ def on_public_ip(self, ip):
+ """Store the public IP of the launchpad
+
+ Arguments:
+ ip - a string containing the public IP address of the launchpad
+
+ """
+ self.config.public_ip = ip
+
+ @asyncio.coroutine
+ def run(self):
+ address = rwlib.getenv("RWVM_INTERNAL_IPADDR")
+ if (address is None):
+ address=""
+ self.webhooks.listen(WebhookApplication.DEFAULT_WEBHOOK_PORT, address=address)
+
+ def on_instance_started(self):
+ self.log.debug("Got instance started callback")
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
--- /dev/null
+#!/usr/bin/env python3
+
+import argparse
+import asyncio
+import concurrent.futures
+import logging
+import os
+import sys
+import unittest
+import xmlrunner
+import time
+
+import gi
+gi.require_version('RwLog', '1.0')
+
+import rift.tasklets.rwmonitor.core as core
+import rift.mano.cloud as cloud
+
+from gi.repository import RwCloudYang, RwLog, RwVnfrYang
+import rw_peas
+
+@asyncio.coroutine
+def update(loop, log, executor, account, plugin, vim_id):
+ """Update the NFVI metrics for the associated VDUR
+
+ This coroutine will request new metrics from the data-source and update
+ the current metrics.
+
+ """
+ try:
+ # Make the request to the plugin in a separate thread and do
+ # not exceed the timeout
+ _, metrics = yield from asyncio.wait_for(
+ loop.run_in_executor(
+ executor,
+ plugin.nfvi_metrics,
+ account,
+ vim_id
+ ),
+ timeout=10,
+ loop=loop,
+ )
+
+ except asyncio.TimeoutError:
+ msg = "timeout on request for nfvi metrics (vim-id = {})"
+ log.warning(msg.format(vim_id))
+ return
+
+ except Exception as e:
+ log.exception(e)
+ return
+
+ try:
+ # Create uninitialized metric structure
+ vdu_metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+
+ # VCPU
+ vdu_metrics.vcpu.total = 5
+ vdu_metrics.vcpu.utilization = metrics.vcpu.utilization
+
+ # Memory (in bytes)
+ vdu_metrics.memory.used = metrics.memory.used
+ vdu_metrics.memory.total = 5000
+ vdu_metrics.memory.utilization = 100 * vdu_metrics.memory.used / vdu_metrics.memory.total
+
+ # Storage
+ try:
+ vdu_metrics.storage.used = metrics.storage.used
+ utilization = 100 * vdu_metrics.storage.used / vdu_metrics.storage.total
+ if utilization > 100:
+ utilization = 100
+
+ vdu_metrics.storage.utilization = utilization
+
+ except ZeroDivisionError:
+ vdu_metrics.storage.utilization = 0
+
+ # Network (incoming)
+ vdu_metrics.network.incoming.packets = metrics.network.incoming.packets
+ vdu_metrics.network.incoming.packet_rate = metrics.network.incoming.packet_rate
+ vdu_metrics.network.incoming.bytes = metrics.network.incoming.bytes
+ vdu_metrics.network.incoming.byte_rate = metrics.network.incoming.byte_rate
+
+ # Network (outgoing)
+ vdu_metrics.network.outgoing.packets = metrics.network.outgoing.packets
+ vdu_metrics.network.outgoing.packet_rate = metrics.network.outgoing.packet_rate
+ vdu_metrics.network.outgoing.bytes = metrics.network.outgoing.bytes
+ vdu_metrics.network.outgoing.byte_rate = metrics.network.outgoing.byte_rate
+
+ # External ports
+ vdu_metrics.external_ports.total = 5
+
+ # Internal ports
+ vdu_metrics.internal_ports.total = 5
+
+ return vdu_metrics
+
+ except Exception as e:
+ log.exception(e)
+
+
+class TestUploadProgress(unittest.TestCase):
+ ACCOUNT_MSG = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict({
+ "account_type": "openstack",
+ "openstack": {
+ "key": "admin",
+ "secret": "mypasswd",
+ "auth_url": 'http://10.66.4.18:5000/v3/',
+ "tenant": "demo",
+ "mgmt_network": "private"
+ }
+ })
+
+ def setUp(self):
+ self._loop = asyncio.get_event_loop()
+ self._log = logging.getLogger(__file__)
+ self._account = cloud.CloudAccount(
+ self._log,
+ RwLog.Ctx.new(__file__), TestUploadProgress.ACCOUNT_MSG
+ )
+
+ def test_many_updates(self):
+ vim_id = "a7f30def-0942-4425-8454-1ffe02b7db1e"
+ instances = 20
+
+ executor = concurrent.futures.ThreadPoolExecutor(10)
+ while True:
+ tasks = []
+ for _ in range(instances):
+ plugin = rw_peas.PeasPlugin("rwmon_ceilometer", 'RwMon-1.0')
+ impl = plugin.get_interface("Monitoring")
+ task = update(self._loop, self._log, executor, self._account.cal_account_msg, impl, vim_id)
+ tasks.append(task)
+ task = update(self._loop, self._log, executor, self._account.cal_account_msg, impl, vim_id)
+ tasks.append(task)
+ task = update(self._loop, self._log, executor, self._account.cal_account_msg, impl, vim_id)
+ tasks.append(task)
+ self._log.debug("Running %s update tasks", instances)
+ self._loop.run_until_complete(asyncio.wait(tasks, loop=self._loop, timeout=20))
+
+
+def main(argv=sys.argv[1:]):
+ logging.basicConfig(format='TEST %(message)s')
+
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ parser.add_argument('-n', '--no-runner', action='store_true')
+
+ args, unknown = parser.parse_known_args(argv)
+ if args.no_runner:
+ runner = None
+
+ # Set the global logging level
+ logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+ # The unittest framework requires a program name, so use the name of this
+ # file instead (we do not want to have to pass a fake program name to main
+ # when this is called from the interpreter).
+ unittest.main(argv=[__file__] + unknown + ["-v"], testRunner=runner)
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+#!/usr/bin/env python3
+import argparse
+import asyncio
+import gi
+import logging
+import os
+import tempfile
+import unittest
+import xmlrunner
+
+# Add the current directory to the PLUGINDIR so we can use the plugin
+# file added here.
+os.environ["PLUGINDIR"] += (":" + os.path.dirname(os.path.realpath(__file__)))
+gi.require_version("RwDts", "1.0")
+gi.require_version("RwVnfrYang", "1.0")
+from gi.repository import (
+ RwDts,
+ RwVnfrYang,
+)
+
+import rift.tasklets
+import rift.test.dts
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+class RwLogTestCase(rift.test.dts.AbstractDTSTest):
+ # Disable the log_utest_mode so that log messages actually get logged
+ # using the rwlog handler since that is what we are testing here.
+ log_utest_mode = False
+
+ @classmethod
+ def configure_suite(cls, rwmain):
+ pass
+
+ @classmethod
+ def start_test_tasklet(cls):
+ cls.rwmain.add_tasklet(
+ os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ 'reprotesttasklet-python'
+ ),
+ 'reprotesttasklet-python'
+ )
+
+ @classmethod
+ def configure_schema(cls):
+ return RwVnfrYang.get_schema()
+
+ @classmethod
+ def configure_timeout(cls):
+ return 1000000
+
+ def configure_test(self, loop, test_id):
+ self.log.debug("STARTING - %s", self.id())
+ self.tinfo = self.new_tinfo(self.id())
+ self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+
+ @rift.test.dts.async_test
+ def test_tasklet_logging(self):
+ self.start_test_tasklet()
+
+ # The logtesttasklet signals being done, by moving into DTS Running state
+ yield from self.wait_for_tasklet_running("reprotesttasklet-python")
+ @asyncio.coroutine
+ def reader():
+ while True:
+ res_iter = yield from self.dts.query_read("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]/vnfr:vdur[vnfr:id={}]/rw-vnfr:nfvi-metrics".format(
+ quoted_key("a7f30def-0942-4425-8454-1ffe02b7db1e"), quoted_key("a7f30def-0942-4425-8454-1ffe02b7db1e"),
+ ))
+ for ent in res_iter:
+ res = yield from ent
+ metrics = res.result
+ self.log.debug("got metrics result: %s", metrics)
+
+ for _ in range(20):
+ self.loop.create_task(reader())
+
+ while True:
+ yield from asyncio.sleep(.001, loop=self.loop)
+
+
+def main():
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ args, _ = parser.parse_known_args()
+
+ RwLogTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+ unittest.main(testRunner=runner)
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+[Plugin]
+Module=reprotesttasklet-python
+Loader=python3
+Name=reprotesttasklet-python
--- /dev/null
+#!/usr/bin/env python3
+
+import argparse
+import asyncio
+import concurrent.futures
+import gi
+import logging
+import os
+import rwlogger
+import sys
+import time
+import unittest
+import xmlrunner
+
+gi.require_version("RwDts", "1.0")
+from gi.repository import (
+ RwDts as rwdts,
+ RwDtsYang,
+)
+import rift.tasklets
+import rift.test.dts
+
+gi.require_version('RwLog', '1.0')
+
+import rift.tasklets.rwmonitor.core as core
+import rift.mano.cloud as cloud
+
+from gi.repository import RwCloudYang, RwLog, RwVnfrYang
+import rw_peas
+
+from repro import update
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class DtsHandler(object):
+ def __init__(self, tasklet):
+ self.reg = None
+ self.tasklet = tasklet
+
+ @property
+ def log(self):
+ return self.tasklet.log
+
+ @property
+ def log_hdl(self):
+ return self.tasklet.log_hdl
+
+ @property
+ def dts(self):
+ return self.tasklet.dts
+
+ @property
+ def loop(self):
+ return self.tasklet.loop
+
+ @property
+ def classname(self):
+ return self.__class__.__name__
+
+
+class VdurNfviMetricsPublisher(DtsHandler):
+ """
+ A VdurNfviMetricsPublisher is responsible for publishing the NFVI metrics
+ from a single VDU.
+ """
+
+ XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]/vnfr:vdur[vnfr:id={}]/rw-vnfr:nfvi-metrics"
+
+ # This timeout defines the length of time the publisher will wait for a
+ # request to a data source to complete. If the request cannot be completed
+ # before timing out, the current data will be published instead.
+ TIMEOUT = 2.0
+
+ def __init__(self, tasklet, vnfr_id, vdur_id):
+ """Create an instance of VdurNvfiPublisher
+
+ Arguments:
+ tasklet - the tasklet
+ vdur - the VDUR of the VDU whose metrics are published
+
+ """
+ super().__init__(tasklet)
+ self._vnfr_id = vnfr_id
+ self._vdur_id = vdur_id
+
+ self._handle = None
+ self._xpath = VdurNfviMetricsPublisher.XPATH.format(quoted_key(vnfr_id), quoted_key(vdur_id))
+
+ self._deregistered = asyncio.Event(loop=self.loop)
+
+ @property
+ def xpath(self):
+ """The XPATH that the metrics are published on"""
+ return self._xpath
+
+ @asyncio.coroutine
+ def dts_on_prepare(self, xact_info, action, ks_path, msg):
+ """Handles the DTS on_prepare callback"""
+ self.log.debug("{}:dts_on_prepare".format(self.classname))
+
+ if action == rwdts.QueryAction.READ:
+ # If the publisher has been deregistered, the xpath element has
+ # been deleted. So we do not want to publish the metrics and
+ # re-created the element.
+ if not self._deregistered.is_set():
+ metrics = self.tasklet.on_retrieve_nfvi_metrics(self._vdur_id)
+ xact_info.respond_xpath(
+ rwdts.XactRspCode.MORE,
+ self.xpath,
+ metrics,
+ )
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK, self.xpath)
+
+ @asyncio.coroutine
+ def register(self):
+ """Register the publisher with DTS"""
+ self._handle = yield from self.dts.register(
+ xpath=self.xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=self.dts_on_prepare,
+ ),
+ flags=rwdts.Flag.PUBLISHER,
+ )
+
+ def deregister(self):
+ """Deregister the publisher from DTS"""
+ # Mark the publisher for deregistration. This prevents the publisher
+ # from creating an element after it has been deleted.
+ self._deregistered.set()
+
+ # Now that we are done with the registration handle, delete the element
+ # and tell DTS to deregister it
+ self._handle.delete_element(self.xpath)
+ self._handle.deregister()
+ self._handle = None
+
+
+class RwLogTestTasklet(rift.tasklets.Tasklet):
+ """ A tasklet to test Python rwlog interactions """
+ def __init__(self, *args, **kwargs):
+ super(RwLogTestTasklet, self).__init__(*args, **kwargs)
+ self._dts = None
+ self.rwlog.set_category("rw-logtest-log")
+ self._metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+
+ def start(self):
+ """ The task start callback """
+ super(RwLogTestTasklet, self).start()
+
+ self._dts = rift.tasklets.DTS(self.tasklet_info,
+ RwVnfrYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change)
+ @property
+ def dts(self):
+ return self._dts
+
+ @asyncio.coroutine
+ def init(self):
+ pass
+
+ def on_retrieve_nfvi_metrics(self, vdur_id):
+ return self._metrics
+
+ @asyncio.coroutine
+ def run(self):
+ def go():
+ account_msg = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict({
+ "account_type": "openstack",
+ "openstack": {
+ "key": "admin",
+ "secret": "mypasswd",
+ "auth_url": 'http://10.66.4.18:5000/v3/',
+ "tenant": "demo",
+ "mgmt_network": "private"
+ }
+ })
+
+ account = cloud.CloudAccount(
+ self.log,
+ RwLog.Ctx.new(__file__), account_msg
+ )
+
+ vim_id = "a7f30def-0942-4425-8454-1ffe02b7db1e"
+ instances = 20
+
+ executor = concurrent.futures.ThreadPoolExecutor(10)
+ plugin = rw_peas.PeasPlugin("rwmon_ceilometer", 'RwMon-1.0')
+ impl = plugin.get_interface("Monitoring")
+ while True:
+ tasks = []
+ for _ in range(instances):
+ task = update(self.loop, self.log, executor, account.cal_account_msg, impl, vim_id)
+ tasks.append(task)
+
+ self.log.debug("Running %s update tasks", instances)
+ #self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop, timeout=20))
+ done, pending = yield from asyncio.wait(tasks, loop=self.loop, timeout=20)
+ self._metrics = done.pop().result()
+
+ self._publisher = VdurNfviMetricsPublisher(self, "a7f30def-0942-4425-8454-1ffe02b7db1e", "a7f30def-0942-4425-8454-1ffe02b7db1e")
+ yield from self._publisher.register()
+ self.loop.create_task(go())
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.log.debug("Changing state to %s", next_state)
+ self._dts.handle.set_state(next_state)
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/nsr_core.py
rift/tasklets/${TASKLET_NAME}/vnfr_core.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
@date 09-Jul-2016
"""
-
import asyncio
+import collections
import functools
+import gi
import uuid
+import rift.tasklets
+
from gi.repository import (RwDts as rwdts, NsrYang)
import rift.mano.dts as mano_dts
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from . import aggregator as aggregator
class VnfrMonitoringParamSubscriber(mano_dts.AbstractOpdataSubscriber):
"""Registers for VNFR monitoring parameter changes.
-
+
Attributes:
monp_id (str): Monitoring Param ID
vnfr_id (str): VNFR ID
"""
- def __init__(self, log, dts, loop, vnfr_id, monp_id, callback=None):
- super().__init__(log, dts, loop, callback)
+ def __init__(self, log, dts, loop, project, vnfr_id, monp_id, callback=None):
+ super().__init__(log, dts, loop, project, callback)
self.vnfr_id = vnfr_id
self.monp_id = monp_id
def get_xpath(self):
- return("D,/vnfr:vnfr-catalog" +
- "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id) +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
+ "/vnfr:vnfr[vnfr:id={}]".format(quoted_key(self.vnfr_id)) +
"/vnfr:monitoring-param" +
- "[vnfr:id='{}']".format(self.monp_id))
+ "[vnfr:id={}]".format(quoted_key(self.monp_id))))
class NsrMonitoringParam():
"""Class that handles NS Mon-param data.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
+ MonParamMsg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam
MISSING = None
DEFAULT_AGGREGATION_TYPE = "AVERAGE"
@classmethod
- def create_nsr_mon_params(cls, nsd, constituent_vnfrs, store):
+ def create_nsr_mon_params(cls, nsd, constituent_vnfrs, mon_param_project):
"""Convenience class that constructs NSMonitoringParam objects
-
+
Args:
- nsd (RwNsdYang.YangData_Nsd_NsdCatalog_Nsd): Nsd object
+ nsd (RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd): Nsd object
constituent_vnfrs (list): List of constituent vnfr objects of NSR
- store (SubscriberStore): Store object instance
-
+ mon_param_project (MonParamProject): Store object instance
+
Returns:
list NsrMonitoringParam object.
Also handles legacy NSD descriptor which has no mon-param defines. In
such cases the mon-params are created from VNFD's mon-param config.
"""
- MonParamMsg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_MonitoringParam
-
mon_params = []
for mon_param_msg in nsd.monitoring_param:
mon_params.append(NsrMonitoringParam(
mon_param_msg,
- constituent_vnfrs
+ constituent_vnfrs,
+ mon_param_name=mon_param_msg.name
))
# Legacy Handling.
# This indicates that the NSD had no mon-param config.
if not nsd.monitoring_param:
for vnfr in constituent_vnfrs:
- vnfd = store.get_vnfd(vnfr.vnfd.id)
+ vnfd = mon_param_project.get_vnfd(vnfr.vnfd.id)
for monp in vnfd.monitoring_param:
mon_params.append(NsrMonitoringParam(
monp,
[vnfr],
- is_legacy=True))
+ is_legacy=True,
+ mon_param_name=monp.name))
return mon_params
- def __init__(self, monp_config, constituent_vnfrs, is_legacy=False):
+ def __init__(self, monp_config, constituent_vnfrs, is_legacy=False, mon_param_name=None):
"""
Args:
monp_config (GiObject): Config data to create the NSR mon-param msg
is_legacy (bool, optional): If set then the mon-param are created from
vnfd's config and not NSD's config.
"""
+ self._nsd_mon_param_msg = monp_config
self._constituent_vnfr_map = {vnfr.id:vnfr for vnfr in constituent_vnfrs}
# An internal store to hold the data
# create_nsr_mon_params() is already validating for 'is_legacy' by checking if
# nsd is having 'monitoring_param'. So removing 'self.aggregation_type is None' check for is_legacy.
self.is_legacy = is_legacy
+ self.mon_param_name = mon_param_name
if not is_legacy:
- self._msg = self._convert_nsd_msg(monp_config)
+ self._msg = self._convert_nsd_msg()
else:
+ # TODO remove arg for consistency
self._msg = self._convert_vnfd_msg(monp_config)
+ def add_vnfr(self, vnfr):
+ # If already added ignore
+ if vnfr.id in self._constituent_vnfr_map:
+ return
+
+ # Update the map
+ self._constituent_vnfr_map[vnfr.id] = vnfr
+
+ if not self.is_legacy:
+ self._msg = self._convert_nsd_msg()
+
+ def delete_vnfr(self, vnfr):
+ # Update the map
+ if vnfr.id in self._constituent_vnfr_map:
+ del self._constituent_vnfr_map[vnfr.id]
+
+ # Delete the value stores.
+ for vnfr_id, monp_id in list(self.vnfr_monparams.keys()):
+ if vnfr_id == vnfr.id:
+ del self.vnfr_monparams[(vnfr_id, monp_id)]
+
+ if not self.is_legacy:
+ self._msg = self._convert_nsd_msg()
+
+ @property
+ def nsd_mon_param_msg(self):
+ return self._nsd_mon_param_msg
+
@property
def nsr_mon_param_msg(self):
"""Gi object msg"""
return None
- def _constituent_vnfrs(self, constituent_vnfr_ids):
- # Fetch the VNFRs
- vnfr_map = {}
- for constituent_vnfr in constituent_vnfr_ids:
- vnfr_id = constituent_vnfr.vnfr_id
- vnfr_map[vnfr_id] = self._store.get_vnfr(vnfr_id)
-
- return vnfr_map
def _extract_ui_elements(self, monp):
ui_fields = ["group_tag", "description", "widget_type", "units", "value_type"]
return dict(zip(ui_fields, ui_data))
- def _convert_nsd_msg(self, nsd_monp):
- """Create initial msg without values"""
- vnfd_to_vnfr = {vnfr.vnfd.id: vnfr_id
- for vnfr_id, vnfr in self._constituent_vnfr_map.items()}
+ def _convert_nsd_msg(self):
+ """Create/update msg. This is also called when a new VNFR is added."""
+
+ # For a single VNFD there might be multiple vnfrs
+ vnfd_to_vnfr = collections.defaultdict(list)
+ for vnfr_id, vnfr in self._constituent_vnfr_map.items():
+ vnfd_to_vnfr[vnfr.vnfd.id].append(vnfr_id)
# First, convert the monp param ref from vnfd to vnfr terms.
vnfr_mon_param_ref = []
- for vnfd_mon in nsd_monp.vnfd_monitoring_param:
- vnfr_id = vnfd_to_vnfr[vnfd_mon.vnfd_id_ref]
+ for vnfd_mon in self.nsd_mon_param_msg.vnfd_monitoring_param:
+ vnfr_ids = vnfd_to_vnfr[vnfd_mon.vnfd_id_ref]
monp_id = vnfd_mon.vnfd_monitoring_param_ref
- self.vnfr_monparams[(vnfr_id, monp_id)] = self.MISSING
+ for vnfr_id in vnfr_ids:
+ key = (vnfr_id, monp_id)
+ if key not in self.vnfr_monparams:
+ self.vnfr_monparams[key] = self.MISSING
- vnfr_mon_param_ref.append({
- 'vnfr_id_ref': vnfr_id,
- 'vnfr_mon_param_ref': monp_id
- })
+ vnfr_mon_param_ref.append({
+ 'vnfr_id_ref': vnfr_id,
+ 'vnfr_mon_param_ref': monp_id
+ })
monp_fields = {
# For now both the NSD and NSR's monp ID are same.
- 'id': nsd_monp.id,
- 'name': nsd_monp.name,
- 'nsd_mon_param_ref': nsd_monp.id,
+ 'id': self.nsd_mon_param_msg.id,
+ 'name': self.nsd_mon_param_msg.name,
+ 'nsd_mon_param_ref': self.nsd_mon_param_msg.id,
'vnfr_mon_param_ref': vnfr_mon_param_ref,
- 'aggregation_type': nsd_monp.aggregation_type
+ 'aggregation_type': self.nsd_mon_param_msg.aggregation_type
}
- ui_fields = self._extract_ui_elements(nsd_monp)
+ ui_fields = self._extract_ui_elements(self.nsd_mon_param_msg)
monp_fields.update(ui_fields)
monp = self.MonParamMsg.from_dict(monp_fields)
value (Tuple): (value_type, value)
"""
self.vnfr_monparams[key] = value
+
def update_ns_value(self, value_field, value):
"""Updates the NS mon-param data with the aggregated value.
def from_handler(cls, handler, monp, callback):
"""Convenience class to build NsrMonitoringParamPoller object.
"""
- return cls(handler.log, handler.dts, handler.loop, monp, callback)
+ return cls(handler.log, handler.dts, handler.loop, handler.project,
+ monp, callback)
- def __init__(self, log, dts, loop, monp, callback=None):
+ def __init__(self, log, dts, loop, project, monp, callback=None):
"""
Args:
monp (NsrMonitoringParam): Param object
callback (None, optional): Callback to be triggered after value has
been aggregated.
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.monp = monp
- self.subscribers = []
+ self.subscribers = {}
self.callback = callback
self._agg = None
"""
key = (vnfr_id, monp.id)
value = NsrMonitoringParam.extract_value(monp)
-
if not value:
return
if self.callback:
self.callback(self.monp.nsr_mon_param_msg)
+ @asyncio.coroutine
+ def create_pollers(self, create=False, register=False):
+ if (create):
+ for vnfr_id, monp_id in self.monp.vnfr_ids:
+ key = (vnfr_id, monp_id)
+ callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
+
+ # if the poller is already created, ignore
+ if key in self.subscribers:
+ continue
+
+ self.subscribers[key] = VnfrMonitoringParamSubscriber(
+ self.loop,
+ self.dts,
+ self.loop,
+ self.project,
+ vnfr_id,
+ monp_id,
+ callback=callback)
+
+ if register:
+ yield from self.subscribers[key].register()
+
+ @asyncio.coroutine
+ def update(self, vnfr):
+ self.monp.add_vnfr(vnfr)
+ yield from self.create_pollers(create=False, register=True)
+
+ @asyncio.coroutine
+ def delete(self, vnfr):
+ self.monp.delete_vnfr(vnfr)
+ for vnfr_id, monp_id in list(self.subscribers.keys()):
+ if vnfr_id != vnfr.id:
+ continue
+
+ key = (vnfr_id, monp_id)
+ sub = self.subscribers.pop(key)
+ sub.deregister()
+
+
@asyncio.coroutine
def register(self):
- for vnfr_id, monp_id in self.monp.vnfr_ids:
- callback = functools.partial(self.update_value, vnfr_id=vnfr_id)
- self.subscribers.append(VnfrMonitoringParamSubscriber(
- self.loop, self.dts, self.loop, vnfr_id, monp_id, callback=callback))
+ yield from self.create_pollers()
@asyncio.coroutine
def start(self):
- for sub in self.subscribers:
+ for sub in self.subscribers.values():
yield from sub.register()
def stop(self):
- for sub in self.subscribers:
+ for sub in self.subscribers.values():
sub.deregister()
-
+
+ def retrieve_data(self):
+ return self.monp.nsr_mon_param_msg
class NsrMonitorDtsHandler(mano_dts.DtsHandler):
""" NSR monitoring class """
- def __init__(self, log, dts, loop, nsr, constituent_vnfrs, store):
+ def __init__(self, log, dts, loop, project, nsr, constituent_vnfrs):
"""
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): NSR object
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): NSR object
constituent_vnfrs (list): list of VNFRs in NSR
- store (SubscriberStore): Store instance
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.nsr = nsr
- self.store = store
self.constituent_vnfrs = constituent_vnfrs
+ self.dts_updates = dict()
+ self.dts_update_task = None
self.mon_params_pollers = []
-
+
+ def nsr_xpath(self):
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ "[nsr:ns-instance-config-ref={}]".format(quoted_key(self.nsr.ns_instance_config_ref)))
+
def xpath(self, param_id=None):
- return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- "[nsr:ns-instance-config-ref='{}']".format(self.nsr.ns_instance_config_ref) +
+ return self.project.add_project("D,/nsr:ns-instance-opdata/nsr:nsr" +
+ "[nsr:ns-instance-config-ref={}]".format(quoted_key(self.nsr.ns_instance_config_ref)) +
"/nsr:monitoring-param" +
- ("[nsr:id='{}']".format(param_id) if param_id else ""))
-
+ ("[nsr:id={}]".format(quoted_key(param_id)) if param_id else ""))
+
@asyncio.coroutine
def register(self):
+ @asyncio.coroutine
+ def on_prepare(xact_info, query_action, ks_path, msg):
+ nsrmsg =None
+ xpath=None
+ if (self.reg_ready):
+ if (query_action == rwdts.QueryAction.READ):
+ if (len(self.mon_params_pollers)):
+ nsr_dict = {"ns_instance_config_ref": self.nsr.ns_instance_config_ref}
+ nsrmsg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr. \
+ from_dict(nsr_dict)
+ xpath = self.nsr_xpath()
+
+ for poller in self.mon_params_pollers:
+ mp_dict = \
+ NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam. \
+ from_dict(poller.retrieve_data().as_dict())
+ nsrmsg.monitoring_param.append(mp_dict)
+
+ try:
+ xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
+ xpath=self.nsr_xpath(),
+ msg=nsrmsg)
+ except rift.tasklets.dts.ResponseError:
+ pass
+
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self.reg_ready = 1
+
+ handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare, on_ready=on_ready)
+ self.reg_ready = 0
+
self.reg = yield from self.dts.register(xpath=self.xpath(),
- flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+ flags=rwdts.Flag.PUBLISHER,
+ handler=handler)
assert self.reg is not None
+ @asyncio.coroutine
+ def nsr_monparam_update(self):
+ #check if the earlier xact is done or there is an xact
+ try:
+ if (len(self.dts_updates) == 0):
+ self.dts_update_task = None
+ return
+ nsr_dict = {"ns_instance_config_ref": self.nsr.ns_instance_config_ref}
+ nsrmsg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+
+ for k,v in self.dts_updates.items():
+ mp_dict = NsrYang. \
+ YangData_RwProject_Project_NsInstanceOpdata_Nsr_MonitoringParam. \
+ from_dict(v.as_dict())
+ nsrmsg.monitoring_param.append(mp_dict)
+ self.dts_updates.clear()
+
+ yield from self.dts.query_update(self.nsr_xpath(), rwdts.XactFlag.ADVISE,
+ nsrmsg)
+
+ self.dts_update_task = None
+ if (len(self.dts_updates) == 0):
+ #schedule a DTS task to update the NSR again
+ self.add_dtsupdate_task()
+
+ except Exception as e:
+ self.log.exception("Exception updating NSR mon-param: %s", str(e))
+
+ def add_dtsupdate_task(self):
+ if (self.dts_update_task is None):
+ self.dts_update_task = asyncio.ensure_future(self.nsr_monparam_update(), loop=self.loop)
+
def callback(self, nsr_mon_param_msg):
"""Callback that triggers update.
"""
- self.reg.update_element(
- self.xpath(param_id=nsr_mon_param_msg.id),
- nsr_mon_param_msg)
-
+ self.dts_updates[nsr_mon_param_msg.id] = nsr_mon_param_msg
+ #schedule a DTS task to update the NSR if one does not exist
+ self.add_dtsupdate_task()
+
@asyncio.coroutine
def start(self):
- nsd = self.store.get_nsd(self.nsr.nsd_ref)
+ nsd = self.project.get_nsd(self.nsr.nsd_ref)
+
mon_params = NsrMonitoringParam.create_nsr_mon_params(
nsd,
self.constituent_vnfrs,
- self.store)
+ self.project)
for monp in mon_params:
poller = NsrMonitoringParamPoller.from_handler(
yield from poller.register()
yield from poller.start()
+ @asyncio.coroutine
+ def update(self, additional_vnfrs):
+ for vnfr in additional_vnfrs:
+ for poller in self.mon_params_pollers:
+ yield from poller.update(vnfr)
+
+ @asyncio.coroutine
+ def delete(self, deleted_vnfrs):
+ for vnfr in deleted_vnfrs:
+ for poller in self.mon_params_pollers:
+ yield from poller.delete(vnfr)
+
def stop(self):
self.deregister()
for poller in self.mon_params_pollers:
if self.reg is not None:
self.reg.deregister()
self.reg = None
+
+ def apply_vnfr_mon(self, msg, vnfr_id):
+ """ Change in vnfr mon to ne applied"""
+ for poller in self.mon_params_pollers:
+ if (poller.monp.mon_param_name == msg.name):
+ poller.update_value(msg, rwdts.QueryAction.UPDATE, vnfr_id)
"""
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
from gi.repository import (
RwDts as rwdts,
RwLaunchpadYang,
+ NsrYang,
ProtobufC)
import rift.mano.cloud
import rift.mano.dts as subscriber
import rift.tasklets
-
+import concurrent.futures
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
from . import vnfr_core
from . import nsr_core
-class MonitoringParameterTasklet(rift.tasklets.Tasklet):
- """The main task of this Tasklet is to listen for VNFR changes and once the
- VNFR hits the running state, triggers the monitor.
- """
- def __init__(self, *args, **kwargs):
- try:
- super().__init__(*args, **kwargs)
- self.rwlog.set_category("rw-monitor-log")
- except Exception as e:
- self.log.exception(e)
+class MonParamProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(MonParamProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
self.vnfr_subscriber = None
- self.store = None
self.vnfr_monitors = {}
self.nsr_monitors = {}
+ self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
# Needs to be moved to store once the DTS bug is resolved
+ # Gather all VNFRs
self.vnfrs = {}
- def start(self):
- super().start()
-
- self.log.info("Starting MonitoringParameterTasklet")
- self.log.debug("Registering with dts")
-
- self.dts = rift.tasklets.DTS(
- self.tasklet_info,
- RwLaunchpadYang.get_schema(),
- self.loop,
- self.on_dts_state_change
- )
-
- self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_tasklet(
+ self.vnfr_subscriber = subscriber.VnfrCatalogSubscriber.from_project(
self,
callback=self.handle_vnfr)
- self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_tasklet(
+ self.nsr_subsriber = subscriber.NsrCatalogSubscriber.from_project(
self,
callback=self.handle_nsr)
- self.store = subscriber.SubscriberStore.from_tasklet(self)
+ self._nsd_subscriber = subscriber.NsdCatalogSubscriber.from_project(self)
+ self._vnfd_subscriber = subscriber.VnfdCatalogSubscriber.from_project(self)
self.log.debug("Created DTS Api GI Object: %s", self.dts)
- def stop(self):
- try:
- self.dts.deinit()
- except Exception as e:
- self.log.exception(e)
-
@asyncio.coroutine
- def init(self):
+ def register (self):
self.log.debug("creating vnfr subscriber")
- yield from self.store.register()
+ yield from self._nsd_subscriber.register()
+ yield from self._vnfd_subscriber.register()
yield from self.vnfr_subscriber.register()
yield from self.nsr_subsriber.register()
- @asyncio.coroutine
- def run(self):
- pass
- @asyncio.coroutine
- def on_dts_state_change(self, state):
- """Handle DTS state change
+ def deregister(self):
+ self.log.debug("De-register vnfr project {}".format(self.name))
+ self._nsd_subscriber.deregister()
+ self._vnfd_subscriber.deregister()
+ self.vnfr_subscriber.deregister()
+ self.nsr_subsriber.deregister()
- Take action according to current DTS state to transition application
- into the corresponding application state
-
- Arguments
- state - current dts state
+ def _unwrap(self, values, id_name):
+ try:
+ return values[0]
+ except KeyError:
+ self.log.exception("Unable to find the object with the given "
+ "ID {}".format(id_name))
- """
- switch = {
- rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
- rwdts.State.CONFIG: rwdts.State.RUN,
- }
+ def get_vnfd(self, vnfd_id):
+ values = [vnfd for vnfd in list(self._vnfd_subscriber.reg.get_xact_elements()) if vnfd.id == vnfd_id]
+ return self._unwrap(values, vnfd_id)
- handlers = {
- rwdts.State.INIT: self.init,
- rwdts.State.RUN: self.run,
- }
+ def get_nsd(self, nsd_id):
+ values = [nsd for nsd in list(self._nsd_subscriber.reg.get_xact_elements()) if nsd.id == nsd_id]
+ return self._unwrap(values, nsd_id)
- # Transition application to next state
- handler = handlers.get(state, None)
- if handler is not None:
- yield from handler()
-
- # Transition dts to next state
- next_state = switch.get(state, None)
- if next_state is not None:
- self.dts.handle.set_state(next_state)
def handle_vnfr(self, vnfr, action):
"""Starts a monitoring parameter job for every VNFR that reaches
def vnfr_create():
# if vnfr.operational_status == "running" and vnfr.id not in self.vnfr_monitors:
- if vnfr.config_status == "configured" and vnfr.id not in self.vnfr_monitors:
+ vnfr_status = (vnfr.operational_status == "running" and
+ vnfr.config_status in ["configured", "config_not_needed"])
+
+ if vnfr_status and vnfr.id not in self.vnfr_monitors:
vnf_mon = vnfr_core.VnfMonitorDtsHandler.from_vnf_data(
self,
vnfr,
- self.store.get_vnfd(vnfr.vnfd.id))
+ self.get_vnfd(vnfr.vnfd.id))
self.vnfr_monitors[vnfr.id] = vnf_mon
self.vnfrs[vnfr.id] = vnfr
@asyncio.coroutine
def task():
yield from vnf_mon.register()
+ if vnfr.nsr_id_ref in self.nsr_monitors:
+ vnf_mon.update_nsr_mon(self.nsr_monitors[vnfr.nsr_id_ref])
vnf_mon.start()
+ #self.update_nsrs(vnfr, action)
self.loop.create_task(task())
vnf_mon = self.vnfr_monitors.pop(vnfr.id)
vnf_mon.stop()
self.vnfrs.pop(vnfr.id)
+ #self.update_nsrs(vnfr, action)
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
vnfr_create()
elif action == rwdts.QueryAction.DELETE:
vnfr_delete()
+ def update_nsrs(self, vnfr, action):
+ if vnfr.nsr_id_ref not in self.nsr_monitors:
+ return
+
+ monitor = self.nsr_monitors[vnfr.nsr_id_ref]
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ @asyncio.coroutine
+ def update_vnfr():
+ yield from monitor.update([vnfr])
+
+ self.loop.create_task(update_vnfr())
+ elif action == rwdts.QueryAction.DELETE:
+ @asyncio.coroutine
+ def delete_vnfr():
+ try:
+ yield from monitor.delete([vnfr])
+ except Exception as e:
+ self.log.exception(str(e))
+
+ self.loop.create_task(delete_vnfr())
+
+
def handle_nsr(self, nsr, action):
"""Callback for NSR opdata changes. Creates a publisher for every
NS that moves to config state.
Args:
- nsr (RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr): Ns Opdata
+ nsr (RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr): Ns Opdata
action (rwdts.QueryAction): Action type of the change.
"""
+
def nsr_create():
- # if nsr.operational_status == "running" and nsr.ns_instance_config_ref not in self.nsr_monitors:
- if nsr.config_status == "configured" and nsr.ns_instance_config_ref not in self.nsr_monitors:
- nsr_mon = nsr_core.NsrMonitorDtsHandler(
- self.log,
- self.dts,
- self.loop,
- nsr,
- list(self.vnfrs.values()),
- self.store
- )
-
- self.nsr_monitors[nsr.ns_instance_config_ref] = nsr_mon
+ # TODO clean up the if-else mess, exception
- @asyncio.coroutine
- def task():
- yield from nsr_mon.register()
- yield from nsr_mon.start()
+ success_state = (nsr.operational_status == "running" and
+ nsr.config_status == "configured")
- self.loop.create_task(task())
+ if not success_state:
+ return
+
+ if nsr.ns_instance_config_ref in self.nsr_monitors:
+ return
+ constituent_vnfrs = []
+ for vnfr_id in nsr.constituent_vnfr_ref:
+ if (vnfr_id.vnfr_id in self.vnfrs):
+ vnfr_obj = self.vnfrs[vnfr_id.vnfr_id]
+ constituent_vnfrs.append(vnfr_obj)
+ else:
+ pass
+
+ nsr_mon = nsr_core.NsrMonitorDtsHandler(
+ self.log,
+ self.dts,
+ self.loop,
+ self,
+ nsr,
+ constituent_vnfrs
+ )
+ for vnfr_id in nsr.constituent_vnfr_ref:
+ if vnfr_id.vnfr_id in self.vnfr_monitors:
+ self.vnfr_monitors[vnfr_id.vnfr_id].update_nsr_mon(nsr_mon)
+
+ self.nsr_monitors[nsr.ns_instance_config_ref] = nsr_mon
+
+
+ @asyncio.coroutine
+ def task():
+ try:
+ yield from nsr_mon.register()
+ yield from nsr_mon.start()
+ except Exception as e:
+ self.log.exception(e)
+
+ self.loop.create_task(task())
def nsr_delete():
if nsr.ns_instance_config_ref in self.nsr_monitors:
- # if vnfr.operational_status == "running" and vnfr.id in self.vnfr_monitors:
nsr_mon = self.nsr_monitors.pop(nsr.ns_instance_config_ref)
nsr_mon.stop()
nsr_create()
elif action == rwdts.QueryAction.DELETE:
nsr_delete()
+
+
+class MonitoringParameterTasklet(rift.tasklets.Tasklet):
+ """The main task of this Tasklet is to listen for VNFR changes and once the
+ VNFR hits the running state, triggers the monitor.
+ """
+ def __init__(self, *args, **kwargs):
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-monitor-log")
+ except Exception as e:
+ self.log.exception(e)
+
+ self._project_handler = None
+ self.projects = {}
+
+ def start(self):
+ super().start()
+
+ self.log.info("Starting MonitoringParameterTasklet")
+ self.log.debug("Registering with dts")
+
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ NsrYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ def stop(self):
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, MonParamProject)
+ self.project_handler.register()
+
+ @asyncio.coroutine
+ def run(self):
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
#
import asyncio
-import logging
import collections
import concurrent
-import types
-
+import gi
+import logging
import requests
import requests.auth
import tornado.escape
+import types
from requests.packages.urllib3.exceptions import InsecureRequestWarning
-import gi
gi.require_version('RwDts', '1.0')
import rift.tasklets
from gi.repository import (
import rift.mano.dts as mano_dts
import rwlogger
import xmltodict, json
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
class MonitoringParamError(Exception):
"""Monitoring Parameter error"""
class HTTPEndpoint(object):
- def __init__(self, log, loop, ip_address, ep_msg):
+ def __init__(self, log, loop, ip_address, ep_msg, executor=None):
self._log = log
self._loop = loop
self._ip_address = ip_address
self._ep_msg = ep_msg
-
+ self._executor = executor
+
# This is to suppress HTTPS related warning as we do not support
# certificate verification yet
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
return self._ep_msg.method
return "GET"
+ @property
+ def query_data(self):
+ if self._ep_msg.has_field("data"):
+ return self._ep_msg.data
+ return None
+
@property
def username(self):
if self._ep_msg.has_field("username"):
def _poll(self):
try:
resp = self._session.request(
- self.method, self.url, timeout=10, auth=self.auth,
- headers=self.headers, verify=False
- )
+ self.method, self.url, timeout=10, auth=self.auth,
+ headers=self.headers, verify=False, data=self.query_data
+ )
+
resp.raise_for_status()
except requests.exceptions.RequestException as e:
msg = "Got HTTP error when request monitoring method {} from url {}: {}".format(
@asyncio.coroutine
def poll(self):
try:
- with concurrent.futures.ThreadPoolExecutor(1) as executor:
- resp = yield from self._loop.run_in_executor(
+ if (self._executor is None):
+ with concurrent.futures.ThreadPoolExecutor(1) as executor:
+ resp = yield from self._loop.run_in_executor(
executor,
self._poll,
- )
+ )
+ else:
+ resp = yield from self._loop.run_in_executor(
+ self._executor,
+ self._poll,
+ )
except MonitoringParamError as e:
msg = "Caught exception when polling http endpoint: %s" % str(e)
self._on_update_cb = on_update_cb
self._poll_task = None
-
+
@property
def poll_interval(self):
return self._endpoint.poll_interval
def _apply_response_to_mon_params(self, response_msg):
for mon_param in self._mon_params:
mon_param.extract_value_from_response(response_msg)
-
+
self._notify_subscriber()
-
+
@asyncio.coroutine
def _poll_loop(self):
self._log.debug("Starting http endpoint %s poll loop", self._endpoint.url)
try:
response = yield from self._endpoint.poll()
self._apply_response_to_mon_params(response)
+ except MonitoringParamError as e:
+ pass
except concurrent.futures.CancelledError as e:
return
self._poll_task = None
+ def retrieve(self, xact_info, ks_path, send_handler):
+ send_handler(xact_info, self._get_mon_param_msgs())
+
class VnfMonitoringParamsController(object):
def __init__(self, log, loop, vnfr_id, management_ip,
http_endpoint_msgs, monitoring_param_msgs,
- on_update_cb=None):
+ on_update_cb=None, executor=None):
self._log = log
self._loop = loop
self._vnfr_id = vnfr_id
+ self._executor = executor
self._management_ip = management_ip
self._http_endpoint_msgs = http_endpoint_msgs
self._monitoring_param_msgs = monitoring_param_msgs
self._endpoints, self._mon_params
)
self._endpoint_pollers = self._create_endpoint_pollers(self._endpoint_mon_param_map)
-
+
def _create_endpoints(self):
path_endpoint_map = {}
for ep_msg in self._http_endpoint_msgs:
- endpoint = HTTPEndpoint(
- self._log,
- self._loop,
- self._management_ip,
- ep_msg,
- )
+ endpoint = HTTPEndpoint(self._log,
+ self._loop,
+ self._management_ip,
+ ep_msg,self._executor)
+
path_endpoint_map[endpoint.path] = endpoint
return path_endpoint_map
mon_params,
self._on_update_cb
)
-
pollers.append(poller)
-
+
return pollers
@property
for poller in self._endpoint_pollers:
poller.stop()
-
+ def retrieve(self, xact_info, ks_path, send_handler):
+ """Retrieve Monitoring params information """
+ for poller in self._endpoint_pollers:
+ poller.retrieve(xact_info, ks_path, send_handler)
+
class VnfMonitorDtsHandler(mano_dts.DtsHandler):
""" VNF monitoring class """
# List of list: So we need to register for the list in the deepest level
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr/vnfr:monitoring-param"
@classmethod
- def from_vnf_data(cls, tasklet, vnfr_msg, vnfd_msg):
- handler = cls(tasklet.log, tasklet.dts, tasklet.loop,
+ def from_vnf_data(cls, project, vnfr_msg, vnfd_msg):
+ handler = cls(project.log, project.dts, project.loop, project,
vnfr_msg.id, vnfr_msg.mgmt_interface.ip_address,
- vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
+ vnfd_msg.monitoring_param, vnfd_msg.http_endpoint)
return handler
- def __init__(self, log, dts, loop, vnfr_id, mgmt_ip, params, endpoints):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project, vnfr_id, mgmt_ip, params, endpoints, executor=None):
+ super().__init__(log, dts, loop, project)
self._mgmt_ip = mgmt_ip
self._vnfr_id = vnfr_id
-
+ self._executor = executor
+
mon_params = []
for mon_param in params:
- param = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
+ param = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(
mon_param.as_dict()
)
mon_params.append(param)
http_endpoints = []
for endpoint in endpoints:
- endpoint = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
+ endpoint = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict(
endpoint.as_dict()
)
http_endpoints.append(endpoint)
self.log.debug(" - Monitoring Params: %s", mon_params)
self._mon_param_controller = VnfMonitoringParamsController(
- self.log,
- self.loop,
- self._vnfr_id,
- self._mgmt_ip,
- http_endpoints,
- mon_params,
- self.on_update_mon_params
- )
+ self.log,
+ self.loop,
+ self._vnfr_id,
+ self._mgmt_ip,
+ http_endpoints,
+ mon_params,
+ on_update_cb = self.on_update_mon_params,
+ executor=self._executor,
+ )
+ self._nsr_mon = None
def on_update_mon_params(self, mon_param_msgs):
for param_msg in mon_param_msgs:
- self.reg.update_element(
- self.xpath(param_msg.id),
- param_msg,
- rwdts.XactFlag.ADVISE
- )
-
+ #self.reg.update_element(
+ # self.xpath(param_msg.id),
+ # param_msg,
+ # rwdts.XactFlag.ADVISE
+ # )
+ if (self._nsr_mon is not None):
+ self._nsr_mon.apply_vnfr_mon(param_msg, self._vnfr_id)
+
+ def update_dts_read(self, xact_info, mon_param_msgs):
+ for param_msg in mon_param_msgs:
+ xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.MORE,
+ xpath=self.xpath(param_msg.id),
+ msg=param_msg)
+
def start(self):
self._mon_param_controller.start()
def xpath(self, param_id=None):
""" Monitoring params xpath """
- return("D,/vnfr:vnfr-catalog" +
- "/vnfr:vnfr[vnfr:id='{}']".format(self._vnfr_id) +
+ return self.project.add_project(("D,/vnfr:vnfr-catalog" +
+ "/vnfr:vnfr[vnfr:id={}]".format(quoted_key(self._vnfr_id)) +
"/vnfr:monitoring-param" +
- ("[vnfr:id='{}']".format(param_id) if param_id else ""))
+ ("[vnfr:id={}]".format(quoted_key(param_id)) if param_id else "")))
@property
def msg(self):
def __del__(self):
self.stop()
-
+
@asyncio.coroutine
def register(self):
""" Register with dts """
-
+ @asyncio.coroutine
+ def on_prepare(xact_info, query_action, ks_path, msg):
+ if (self.reg_ready):
+ if (query_action == rwdts.QueryAction.READ):
+ self._mon_param_controller.retrieve(xact_info, ks_path, self.update_dts_read)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self.reg_ready = 1
+
+ handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare, on_ready=on_ready)
+ self.reg_ready = 0
self.reg = yield from self.dts.register(xpath=self.xpath(),
- flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
+ flags=rwdts.Flag.PUBLISHER,
+ handler=handler)
assert self.reg is not None
self.reg.deregister()
self.reg = None
self._vnfr = None
+
+ def update_nsr_mon(self, nsr_mon):
+ """ update nsr mon """
+ self._nsr_mon = nsr_mon
+
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'polling_interval_secs': 1,
'ping-response-rx-count': 10
}
- mon_param_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam()
+ mon_param_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam()
mon_param_msg.from_dict({
'id': '1',
'name': 'ping-request-tx-count',
'units': 'packets'
})
- endpoint_msg = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint()
+ endpoint_msg = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint()
endpoint_msg.from_dict({
'path': ping_path,
'https': 'true',
self.assertEqual(value, 12112)
+class vCPEStatTest(unittest.TestCase):
+ system_response = {"timestamp":1473455051,
+ "applicationLoad":[
+{ "service":"RIF", "instance":1, "gtpMessagesPerSec":0},
+{"service":"CPE", "instance":1, "tps":0},
+{"service":"DPE", "instance":1, "uplinkThroughput4G":0, "downlinkThroughput4G":0, "numDefaultBearers":0, "numDedicatedBearers":0 },
+{"service":"VEM", "instance":1 },
+{"service":"CDF", "instance":1, "tps":0},
+{"service":"S6A", "instance":1, "tps":0},
+{"service":"SDB", "instance":1, "queriesPerSec":0 }],
+ "resourceLoad":[
+{ "service":"RIF", "instance":1, "cpu":0, "mem":18, "compCpu":0 },
+{ "service":"CPE", "instance":1, "cpu":0, "mem":26, "compCpu":0 },
+{ "service":"DPE", "instance":1, "cpu":0, "mem":31, "compCpu":0 },
+{ "service":"VEM", "instance":1, "cpu":1, "mem":34, "compCpu":0 },
+{ "service":"CDF", "instance":1, "cpu":0, "mem":18, "compCpu":0 },
+{ "service":"S6A", "instance":1, "cpu":1, "mem":21, "compCpu":0 },
+{ "service":"SDB", "instance":1, "memUsedByData":255543560, "swapUtil":0, "swapTotal":3689934848, "swapUsed":0,"memUtil":0, "memTotal":12490944512, "memFree":10986942464, "cpu":2}] }
+
+
+ def test_object_path_value_querier(self):
+ kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.applicationLoad[@.service is 'DPE'].uplinkThroughput4G")
+ value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+ self.assertEqual(value, 0)
+ kv_querier = mon_params.ObjectPathValueQuerier(logger, "$.resourceLoad[@.service is 'DPE'].mem")
+ value = kv_querier.query(tornado.escape.json_encode(self.system_response))
+ self.assertEqual(value, 31)
+
class XMLReponseTest(unittest.TestCase):
xml_response = "<response status='success'><result> <entry> <current>2</current> <vsys>1</vsys> <maximum>0</maximum> <throttled>0</throttled> </entry> </result></response>"
RwLaunchpadYang as launchpadyang,
RwDts as rwdts,
RwVnfrYang,
- RwVnfdYang,
- RwNsdYang
+ RwProjectVnfdYang as RwVnfdYang,
+ RwProjectNsdYang as RwNsdYang,
)
import utest_mon_params
class MonParamMsgGenerator(object):
def __init__(self, num_messages=1):
ping_path = r"/api/v1/ping/stats"
- self._endpoint_msg = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
+ self._endpoint_msg = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_HttpEndpoint.from_dict({
'path': ping_path,
'https': 'true',
'polling_interval_secs': 1,
self._mon_param_msgs = []
for i in range(1, num_messages):
- self._mon_param_msgs.append(vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
+ self._mon_param_msgs.append(vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict({
'id': '%s' % i,
'name': 'param_num_%s' % i,
'json_query_method': "NAMEKEY",
@classmethod
def configure_timeout(cls):
- return 240
+ return 480
def configure_test(self, loop, test_id):
self.log.debug("STARTING - %s", test_id)
def setup_mock_store(self, aggregation_type, monps, legacy=False):
store = mock.MagicMock()
- mock_vnfd = RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd.from_dict({
+ mock_vnfd = RwVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict({
'id': "1",
'monitoring_param': [
{'description': 'no of ping requests',
})
store.get_vnfd = mock.MagicMock(return_value=mock_vnfd)
- mock_vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict({
+ mock_vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict({
'id': '1',
'monitoring_param': ([monp.as_dict() for monp in monps] if not legacy else [])
})
- mock_vnfr.vnfd = vnfryang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
+ mock_vnfr.vnfd = vnfryang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict({'id': '1'})
store.get_vnfr = mock.MagicMock(return_value=mock_vnfr)
- mock_nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict({
+ mock_nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict({
'ns_instance_config_ref': "1",
'name_ref': "Foo",
'constituent_vnfr_ref': [{'vnfr_id': mock_vnfr.id}],
'vnfd_monitoring_param_ref': '2'}]
}]
- mock_nsd = RwNsdYang.YangData_Nsd_NsdCatalog_Nsd.from_dict({
+ mock_nsd = RwNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict({
'id': str(uuid.uuid1()),
'monitoring_param': (monp if not legacy else [])
})
def register_vnf_publisher(self):
yield from self.vnf_handler.register()
- def add_param_to_publisher(self):
+ def add_param_to_publisher(self, publisher):
msg = self.msg_gen.next_message()
- self.vnf_handler.on_update_mon_params([msg])
+ publisher.on_update_mon_params([msg])
return msg
@asyncio.coroutine
@rift.test.dts.async_test
def _test_add_vnf_mon_params(self):
yield from self.register_vnf_publisher()
- self.add_param_to_publisher()
+ self.add_param_to_publisher(self.vnf_handler)
yield from self.register_vnf_test_subscriber()
- self.add_param_to_publisher()
+ self.add_param_to_publisher(self.vnf_handler)
# RIFT-12888: Elements do not go immediately into cache after on_prepare.
# Because of this, we can't guarantee that the second param will actually be
def _test_publish(self, aggregation_type, expected_value, legacy=False):
- self.msg_gen = MonParamMsgGenerator(4)
+ self.msg_gen = MonParamMsgGenerator(5)
store = self.setup_mock_store(aggregation_type=aggregation_type,
monps=self.msg_gen.mon_param_msgs,
legacy=legacy)
published_xpaths = yield from self.get_published_xpaths()
yield from self.register_vnf_publisher()
- self.add_param_to_publisher()
- self.add_param_to_publisher()
+ self.add_param_to_publisher(self.vnf_handler)
+ self.add_param_to_publisher(self.vnf_handler)
nsr_id = store.get_nsr().ns_instance_config_ref
- yield from asyncio.sleep(5, loop=self.loop)
+ yield from asyncio.sleep(2, loop=self.loop)
itr = yield from self.dts.query_read(self.nsr_handler.xpath(),
rwdts.XactFlag.MERGE)
def test_legacy_nsr_monitor_publish_avg(self):
yield from self._test_publish("AVERAGE", 1, legacy=True)
+ @rift.test.dts.async_test
+ def test_vnfr_add_delete(self):
+ yield from self._test_publish("SUM", 3)
+
+ self.msg_gen = MonParamMsgGenerator(5)
+ store = self.setup_mock_store(aggregation_type="SUM",
+ monps=self.msg_gen.mon_param_msgs)
+ new_vnf_handler = vnf_mon_params.VnfMonitorDtsHandler(
+ self.log, self.dts, self.loop, 2, "2.2.2.1",
+ self.msg_gen.mon_param_msgs, self.msg_gen.endpoint_msgs
+ )
+ yield from new_vnf_handler.register()
+
+ # add a new vnfr
+ new_vnfr = store.get_vnfr()
+ new_vnfr.id = '2'
+ yield from self.nsr_handler.update([new_vnfr])
+
+ # check if the newly created one has been added in the model
+ poller = self.nsr_handler.mon_params_pollers[0]
+ assert len(poller.monp.nsr_mon_param_msg.vnfr_mon_param_ref) == 4
+ assert len(poller.subscribers) == 4
+ assert len(poller.monp.vnfr_monparams) == 4
+
+ # publish new values
+ yield from asyncio.sleep(2, loop=self.loop)
+ self.add_param_to_publisher(new_vnf_handler)
+ self.add_param_to_publisher(new_vnf_handler)
+ yield from asyncio.sleep(3, loop=self.loop)
+
+ itr = yield from self.dts.query_read(self.nsr_handler.xpath(),
+ rwdts.XactFlag.MERGE)
+
+ values = []
+ for res in itr:
+ result = yield from res
+ nsr_monp = result.result
+ values.append(nsr_monp.value_integer)
+
+ assert values[0] == 6
+
+ # delete the VNFR
+ yield from self.nsr_handler.delete([new_vnfr])
+
+ # check if the newly created one has been added in the model
+ poller = self.nsr_handler.mon_params_pollers[0]
+ assert len(poller.monp.vnfr_monparams) == 2
+ assert len(poller.monp.nsr_mon_param_msg.vnfr_mon_param_ref) == 2
+ assert len(poller.subscribers) == 2
+
+ self.msg_gen = MonParamMsgGenerator(5)
+ self.add_param_to_publisher(self.vnf_handler)
+ self.add_param_to_publisher(self.vnf_handler)
+ yield from asyncio.sleep(2, loop=self.loop)
+
+ itr = yield from self.dts.query_read(self.nsr_handler.xpath(),
+ rwdts.XactFlag.MERGE)
+ values = []
+ for res in itr:
+ result = yield from res
+ nsr_monp = result.result
+ values.append(nsr_monp.value_integer)
+
+ assert values[0] == 3
+
def main():
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
rift/tasklets/${TASKLET_NAME}/rwnsm_conman.py
+ rift/tasklets/${TASKLET_NAME}/nsmpluginbase.py
rift/tasklets/${TASKLET_NAME}/rwnsmplugin.py
rift/tasklets/${TASKLET_NAME}/openmano_nsm.py
rift/tasklets/${TASKLET_NAME}/cloud.py
rift/tasklets/${TASKLET_NAME}/xpath.py
rift/tasklets/${TASKLET_NAME}/rwvnffgmgr.py
rift/tasklets/${TASKLET_NAME}/scale_group.py
- COMPONENT ${PKG_LONG_NAME}
+ rift/tasklets/${TASKLET_NAME}/subscriber.py
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
)
import rift.mano.cloud
+import rift.mano.ro_account
import rift.mano.dts as mano_dts
import rift.tasklets
-from . import openmano_nsm
from . import rwnsmplugin
-
-class RwNsPlugin(rwnsmplugin.NsmPluginBase):
- """
- RW Implentation of the NsmPluginBase
- """
- def __init__(self, dts, log, loop, publisher, ro_account):
- self._dts = dts
- self._log = log
- self._loop = loop
-
- def set_state(self, nsr_id, state):
- pass
-
- def create_nsr(self, nsr_msg, nsd,key_pairs=None):
- """
- Create Network service record
- """
- pass
-
- @asyncio.coroutine
- def deploy(self, nsr):
- pass
-
- @asyncio.coroutine
- def instantiate_ns(self, nsr, config_xact):
- """
- Instantiate NSR with the passed nsr id
- """
- yield from nsr.instantiate(config_xact)
-
- @asyncio.coroutine
- def instantiate_vnf(self, nsr, vnfr, scaleout=False):
- """
- Instantiate NSR with the passed nsr id
- """
- yield from vnfr.instantiate(nsr)
-
- @asyncio.coroutine
- def instantiate_vl(self, nsr, vlr):
- """
- Instantiate NSR with the passed nsr id
- """
- yield from vlr.instantiate()
-
- @asyncio.coroutine
- def terminate_ns(self, nsr):
- """
- Terminate the network service
- """
- pass
-
- @asyncio.coroutine
- def terminate_vnf(self, vnfr):
- """
- Terminate the network service
- """
- yield from vnfr.terminate()
-
- @asyncio.coroutine
- def terminate_vl(self, vlr):
- """
- Terminate the virtual link
- """
- yield from vlr.terminate()
-
-
-class NsmPlugins(object):
- """ NSM Plugins """
- def __init__(self):
- self._plugin_classes = {
- "openmano": openmano_nsm.OpenmanoNsPlugin,
- }
-
- @property
- def plugins(self):
- """ Plugin info """
- return self._plugin_classes
-
- def __getitem__(self, name):
- """ Get item """
- print("%s", self._plugin_classes)
- return self._plugin_classes[name]
-
- def register(self, plugin_name, plugin_class, *args):
- """ Register a plugin to this Nsm"""
- self._plugin_classes[plugin_name] = plugin_class
-
- def deregister(self, plugin_name, plugin_class, *args):
- """ Deregister a plugin to this Nsm"""
- if plugin_name in self._plugin_classes:
- del self._plugin_classes[plugin_name]
-
- def class_by_plugin_name(self, name):
- """ Get class by plugin name """
- return self._plugin_classes[name]
-
-
class CloudAccountConfigSubscriber:
- def __init__(self, log, dts, log_hdl):
+ def __init__(self, log, dts, log_hdl, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
-
+ self._project = project
+
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
self._dts,
self._log,
self._log_hdl,
+ self._project,
rift.mano.cloud.CloudAccountConfigCallbacks())
def get_cloud_account_sdn_name(self, account_name):
self._log.debug("No SDN Account associated with Cloud name %s", account_name)
return None
+ def get_cloud_account_msg(self,account_name):
+ if account_name in self._cloud_sub.accounts:
+ self._log.debug("Cloud accnt msg is %s",self._cloud_sub.accounts[account_name].account_msg)
+ return self._cloud_sub.accounts[account_name].account_msg
+
@asyncio.coroutine
def register(self):
- self._cloud_sub.register()
+ yield from self._cloud_sub.register()
+ def deregister(self):
+ self._cloud_sub.deregister()
-class ROAccountPluginSelector(object):
- """
- Select the RO based on the config.
-
- If no RO account is specified, then default to rift-ro.
-
- Note:
- Currently only one RO can be used (one-time global config.)
- """
- DEFAULT_PLUGIN = RwNsPlugin
-
- def __init__(self, dts, log, loop, records_publisher):
+class ROAccountConfigSubscriber:
+ def __init__(self, dts, log, loop, project, records_publisher):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._records_publisher = records_publisher
- self._nsm_plugins = NsmPlugins()
-
- self._ro_sub = mano_dts.ROAccountConfigSubscriber(
- self._log,
+ self._log.debug("Inside cloud - RO Account Config Subscriber init")
+
+ self._ro_sub = rift.mano.ro_account.ROAccountConfigSubscriber(
self._dts,
- self._loop,
- callback=self.on_ro_account_change
- )
- self._nsr_sub = mano_dts.NsrCatalogSubscriber(
self._log,
- self._dts,
self._loop,
- self.handle_nsr)
-
- # The default plugin will be RwNsPlugin
- self._ro_plugin = self._create_plugin(self.DEFAULT_PLUGIN, None)
- self.live_instances = 0
-
- @property
- def ro_plugin(self):
- return self._ro_plugin
-
- def handle_nsr(self, nsr, action):
- if action == rwdts.QueryAction.CREATE:
- self.live_instances += 1
- elif action == rwdts.QueryAction.DELETE:
- self.live_instances -= 1
-
- def on_ro_account_change(self, ro_account, action):
- if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
- self._on_ro_account_change(ro_account)
- elif action == rwdts.QueryAction.DELETE:
- self._on_ro_account_deleted(ro_account)
-
- def _on_ro_account_change(self, ro_account):
- self._log.debug("Got nsm plugin RO account: %s", ro_account)
- try:
- nsm_cls = self._nsm_plugins.class_by_plugin_name(
- ro_account.account_type
- )
- except KeyError as e:
- self._log.debug(
- "RO account nsm plugin not found: %s. Using standard rift nsm.",
- ro_account.name
- )
- nsm_cls = self.DEFAULT_PLUGIN
-
- ro_plugin = self._create_plugin(nsm_cls, ro_account)
- if self.live_instances == 0:
- self._ro_plugin = ro_plugin
- else:
- raise ValueError("Unable to change the plugin when live NS instances exists!")
-
- def _on_ro_account_deleted(self, ro_account):
- self._ro_plugin = None
-
- def _create_plugin(self, nsm_cls, ro_account):
-
- self._log.debug("Instantiating new RO account using class: %s", nsm_cls)
- nsm_instance = nsm_cls(self._dts, self._log, self._loop,
- self._records_publisher, ro_account)
-
- return nsm_instance
-
+ self._project,
+ self._records_publisher,
+ rift.mano.ro_account.ROAccountConfigCallbacks())
+
+ def get_ro_plugin(self, account_name):
+ if (account_name is not None) and (account_name in self._ro_sub.accounts):
+ ro_account = self._ro_sub.accounts[account_name]
+ self._log.debug("RO Account associated with name %s is %s", account_name, ro_account)
+ return ro_account.ro_plugin
+
+ self._log.debug("RO Account associated with name %s using default plugin", account_name)
+ return rwnsmplugin.RwNsPlugin(self._dts, self._log, self._loop, self._records_publisher, None, self._project)
+
@asyncio.coroutine
def register(self):
- yield from self._ro_sub.register()
- yield from self._nsr_sub.register()
+ self._log.debug("Registering ROAccount Config Subscriber")
+ yield from self._ro_sub.register()
+
+ def deregister(self):
+ self._ro_sub.deregister()
\ No newline at end of file
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import abc
+
+class NsmPluginBase(object):
+ """
+ Abstract base class for the NSM plugin.
+ There will be single instance of this plugin for each plugin type.
+ """
+
+ def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._nsm = nsm
+ self._plugin_name = plugin_name
+ self._dts_publisher = dts_publisher
+
+ @property
+ def dts(self):
+ return self._dts
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def loop(self):
+ return self._loop
+
+ @property
+ def nsm(self):
+ return self._nsm
+
+ @abc.abstractmethod
+ def set_state(self, nsr_id, state):
+ pass
+
+ @abc.abstractmethod
+ def create_nsr(self, nsr, nsd, key_pairs=None, ssh_key=None):
+ """ Create an NSR """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def deploy(self, nsr_msg):
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def instantiate_ns(self, nsr, xact):
+ """ Instantiate the network service """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def instantiate_vnf(self, nsr, vnfr, scaleout=False):
+ """ Instantiate the virtual network function """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def instantiate_vl(self, nsr, vl):
+ """ Instantiate the virtual link"""
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def update_vnfr(self, vnfr):
+ """ Update the virtual network function record """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def get_nsr(self, nsr_path):
+ """ Get the NSR """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def get_vnfr(self, vnfr_path):
+ """ Get the VNFR """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def get_vlr(self, vlr_path):
+ """ Get the VLR """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def terminate_ns(self, nsr):
+ """Terminate the network service """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def terminate_vnf(self, nsr, vnfr, scalein=False):
+ """Terminate the VNF """
+ pass
+
+ @abc.abstractmethod
+ @asyncio.coroutine
+ def terminate_vl(self, vlr):
+ """Terminate the Virtual Link Record"""
+ pass
#
import asyncio
+import gi
import os
import sys
import time
import yaml
-import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwVnfrYang', '1.0')
from gi.repository import (
RwDts as rwdts,
RwVnfrYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.openmano.rift2openmano as rift2openmano
import rift.openmano.openmano_client as openmano_client
-from . import rwnsmplugin
+from . import nsmpluginbase
from enum import Enum
-
+import ipaddress
import rift.tasklets
if sys.version_info < (3, 4, 4):
DUMP_OPENMANO_DIR = os.path.join(
- os.environ["RIFT_ARTIFACTS"],
+ os.environ["RIFT_VAR_ROOT"],
"openmano_descriptors"
)
@property
def vnfr_vdu_console_xpath(self):
""" path for resource-mgr"""
- return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+ return self._project.add_project(
+ "D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id={}]/rw-vnfr:vdur[vnfr:id={}]".format(
+ quoted_key(self._vnfr_id), quoted_key(self._vdur_id)))
- def __init__(self, dts, log, loop, nsr, vnfr_id, vdur_id, vdu_id):
+ def __init__(self, project, dts, log, loop, nsr, vnfr_id, vdur_id, vdu_id):
+ self._project = project
self._dts = dts
self._log = log
self._loop = loop
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
try:
)
self._log.debug("Got console response: %s for NSR ID %s vdur ID %s",
- console_url,
- self._nsr._nsr_uuid,
- self._vdur_id
- )
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ console_url,
+ self._nsr._nsr_uuid,
+ self._vdur_id
+ )
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if console_url:
vdur_console.console_url = console_url
self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
except openmano_client.InstanceStatusError as e:
self._log.error("Could not get NS instance console URL: %s",
- str(e))
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ str(e))
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
class OpenmanoVnfr(object):
- def __init__(self, log, loop, cli_api, vnfr, nsd):
+ def __init__(self, log, loop, cli_api, http_api, vnfr, nsd, ssh_key=None):
self._log = log
self._loop = loop
self._cli_api = cli_api
+ self._http_api = http_api
self._vnfr = vnfr
self._vnfd_id = vnfr.vnfd.id
self._created = False
self.nsd = nsd
+ self._ssh_key = ssh_key
@property
def vnfd(self):
@property
def openmano_vnfd(self):
self._log.debug("Converting vnfd %s from rift to openmano", self.vnfd.id)
- openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd, self.nsd)
+ openmano_vnfd = rift2openmano.rift2openmano_vnfd(self.vnfd, self.nsd, self._http_api)
return openmano_vnfd
@property
@asyncio.coroutine
def create(self):
- self._log.debug("Creating openmano vnfd")
- openmano_vnfd = self.openmano_vnfd
- name = openmano_vnfd["vnf"]["name"]
-
- # If the name already exists, get the openmano vnfd id
- name_uuid_map = yield from self._loop.run_in_executor(
- None,
- self._cli_api.vnf_list,
- )
-
- if name in name_uuid_map:
- vnf_id = name_uuid_map[name]
- self._log.debug("Vnf already created. Got existing openmano vnfd id: %s", vnf_id)
- self._vnf_id = vnf_id
- return
-
- self._vnf_id, _ = yield from self._loop.run_in_executor(
- None,
- self._cli_api.vnf_create,
- self.openmano_vnfd_yaml,
- )
-
- fpath = dump_openmano_descriptor(
- "{}_vnf".format(name),
- self.openmano_vnfd_yaml
- )
+ try:
+ self._log.debug("Created openmano vnfd")
+ # The self.openmano_vnfd_yaml internally creates the vnf if not found.
+ # Assigning the yaml to a variable so that the api is not fired unnecessarily.
+ openmano_vnfd = self.openmano_vnfd
+ name = openmano_vnfd["name"]
- self._log.debug("Dumped Openmano VNF descriptor to: %s", fpath)
+ self._vnf_id = openmano_vnfd['uuid']
- self._created = True
+ self._created = True
+ except Exception as e:
+ self._log.error("Failed to create vnf on Openmano RO : %s", e)
+ raise e
def delete(self):
if not self._created:
TIMEOUT_SECS = 300
INSTANCE_TERMINATE_TIMEOUT = 60
- def __init__(self, dts, log, loop, publisher, cli_api, http_api, nsd_msg, nsr_config_msg,key_pairs,rift_vnfd_id=None ):
+ def __init__(self, project, dts, log, loop, publisher, cli_api, http_api, nsd_msg,
+ nsr_config_msg, key_pairs, ssh_key, rift_vnfd_id=None ):
+ self._project = project
self._log = log
self._dts = dts
self._loop = loop
self._nsrs = {}
self._vdur_console_handler = {}
self._key_pairs = key_pairs
+ self._ssh_key = ssh_key
self._nsd_uuid = None
self._nsr_uuid = None
self._rift_vnfd_id = rift_vnfd_id
self._state = OpenmanoNSRecordState.INIT
+ self._active_vms = 0
+ self._active_nets = 0
+
@property
def nsd(self):
return rift2openmano.RiftNSD(self._nsd_msg)
def vlrs(self):
return self._vlrs
+ @property
+ def http_api(self):
+ return self._http_api
+
@property
def openmano_nsd_yaml(self):
self._log.debug("Converting nsd %s from rift to openmano", self.nsd.id)
- openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds,self.vnfr_ids)
+ openmano_nsd = rift2openmano.rift2openmano_nsd(self.nsd, self.vnfds,self.vnfr_ids, self.http_api)
return yaml.safe_dump(openmano_nsd, default_flow_style=False)
@property
def openmano_scaling_yaml(self):
self._log.debug("Creating Openmano Scaling Descriptor %s")
try:
- openmano_vnfd_nsd = rift2openmano.rift2openmano_vnfd_nsd(self.nsd, self.vnfds, self.vnfr_ids, self._rift_vnfd_id)
+ openmano_vnfd_nsd = rift2openmano.rift2openmano_vnfd_nsd(self.nsd, self.vnfds, self.vnfr_ids, self.http_api, self._rift_vnfd_id)
return yaml.safe_dump(openmano_vnfd_nsd, default_flow_style=False)
except Exception as e:
self._log.exception("Scaling Descriptor Exception: %s", str(e))
self._log.debug("Key pair NSD is %s",authorized_key)
key_pairs.append(authorized_key.key)
+ if self._ssh_key['public_key']:
+ self._log.debug("Pub key NSD is %s", self._ssh_key['public_key'])
+ key_pairs.append(self._ssh_key['public_key'])
+
if key_pairs:
cloud_config["key-pairs"] = key_pairs
cloud_config = self.get_ssh_key_pairs()
if cloud_config:
openmano_instance_create["cloud-config"] = cloud_config
- if self._nsr_config_msg.has_field("om_datacenter"):
- openmano_instance_create["datacenter"] = self._nsr_config_msg.om_datacenter
+ if self._nsr_config_msg.has_field("datacenter"):
+ openmano_instance_create["datacenter"] = self._nsr_config_msg.datacenter
openmano_instance_create["vnfs"] = {}
for vnfr in self._vnfrs:
- if "om_datacenter" in vnfr.vnfr.vnfr_msg:
- vnfr_name = vnfr.vnfr.vnfd.name + "__" + str(vnfr.vnfr.vnfr_msg.member_vnf_index_ref)
- openmano_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.om_datacenter}
+ if "datacenter" in vnfr.vnfr.vnfr_msg:
+ vnfr_name = vnfr.vnfr.vnfd.name + "." + str(vnfr.vnfr.vnfr_msg.member_vnf_index_ref)
+ openmano_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.datacenter}
openmano_instance_create["networks"] = {}
for vld_msg in self._nsd_msg.vld:
openmano_instance_create["networks"][vld_msg.name] = {}
for vlr in self._vlrs:
if vlr.vld_msg.name == vld_msg.name:
self._log.debug("Received VLR name %s, VLR DC: %s for VLD: %s",vlr.vld_msg.name,
- vlr.om_datacenter_name,vld_msg.name)
+ vlr.datacenter_name,vld_msg.name)
#network["vim-network-name"] = vld_msg.name
network = {}
ip_profile = {}
ip_profile['dhcp']['enabled'] = ip_profile_params.dhcp_params.enabled
ip_profile['dhcp']['start-address'] = ip_profile_params.dhcp_params.start_address
ip_profile['dhcp']['count'] = ip_profile_params.dhcp_params.count
+ if ip_profile['dhcp']['enabled'] is True and ip_profile['dhcp']['start-address'] is None:
+ addr_pool = list(ipaddress.ip_network(ip_profile['subnet-address']).hosts())
+ gateway_ip_addr = ip_profile.get('gateway-address', None)
+ if gateway_ip_addr is None:
+ gateway_ip_addr = str(next(iter(addr_pool)))
+ ip_profile['gateway-address'] = gateway_ip_addr
+
+ self._log.debug("Gateway Address {}".format(gateway_ip_addr))
+
+ if ipaddress.ip_address(gateway_ip_addr) in addr_pool:
+ addr_pool.remove(ipaddress.ip_address(gateway_ip_addr))
+ if len(addr_pool) > 0:
+ ip_profile['dhcp']['start-address'] = str(next(iter(addr_pool)))
+ #DHCP count more than 200 is not instantiating any instances using OPENMANO RO
+ #So restricting it to a feasible count of 100.
+ dhcp_count = ip_profile['dhcp']['count']
+ if dhcp_count is None or dhcp_count == 0 or dhcp_count > len(addr_pool):
+ ip_profile['dhcp']['count'] = min(len(addr_pool), 100)
+ self._log.debug("DHCP start Address {} DHCP count {}".
+ format(ip_profile['dhcp']['start-address'], ip_profile['dhcp']['count']))
else:
network["netmap-create"] = vlr.name
- if vlr.om_datacenter_name:
- network["datacenter"] = vlr.om_datacenter_name
- elif vld_msg.has_field("om_datacenter"):
- network["datacenter"] = vld_msg.om_datacenter
+ if vlr.datacenter_name:
+ network["datacenter"] = vlr.datacenter_name
+ elif vld_msg.has_field("datacenter"):
+ network["datacenter"] = vld_msg.datacenter
elif "datacenter" in openmano_instance_create:
network["datacenter"] = openmano_instance_create["datacenter"]
if network:
scaling_instance_create["description"] = self._nsr_config_msg.description
- if self._nsr_config_msg.has_field("om_datacenter"):
- scaling_instance_create["datacenter"] = self._nsr_config_msg.om_datacenter
+ if self._nsr_config_msg.has_field("datacenter"):
+ scaling_instance_create["datacenter"] = self._nsr_config_msg.datacenter
scaling_instance_create["vnfs"] = {}
for vnfr in self._vnfrs:
- if "om_datacenter" in vnfr.vnfr.vnfr_msg:
+ if "datacenter" in vnfr.vnfr.vnfr_msg:
vnfr_name = vnfr.vnfr.vnfd.name + "__" + str(vnfr.vnfr.vnfr_msg.member_vnf_index_ref)
- scaling_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.om_datacenter}
+ scaling_instance_create["vnfs"][vnfr_name] = {"datacenter": vnfr.vnfr.vnfr_msg.datacenter}
scaling_instance_create["networks"] = {}
for vld_msg in self._nsd_msg.vld:
scaling_instance_create["networks"][vld_msg.name] = {}
for vlr in self._vlrs:
if vlr.vld_msg.name == vld_msg.name:
self._log.debug("Received VLR name %s, VLR DC: %s for VLD: %s",vlr.vld_msg.name,
- vlr.om_datacenter_name,vld_msg.name)
+ vlr.datacenter_name,vld_msg.name)
#network["vim-network-name"] = vld_msg.name
network = {}
ip_profile = {}
network["netmap-use"] = vld_msg.vim_network_name
#else:
# network["netmap-create"] = vlr.name
- if vlr.om_datacenter_name:
- network["datacenter"] = vlr.om_datacenter_name
- elif vld_msg.has_field("om_datacenter"):
- network["datacenter"] = vld_msg.om_datacenter
+ if vlr.datacenter_name:
+ network["datacenter"] = vlr.datacenter_name
+ elif vld_msg.has_field("datacenter"):
+ network["datacenter"] = vld_msg.datacenter
elif "datacenter" in scaling_instance_create:
network["datacenter"] = scaling_instance_create["datacenter"]
if network:
None,
self._cli_api.ns_vim_network_delete,
vlr.name,
- vlr.om_datacenter_name)
+ vlr.datacenter_name)
yield from self._publisher.unpublish_vlr(None, vlr.vlr_msg)
yield from asyncio.sleep(1, loop=self._loop)
@asyncio.coroutine
def add_vnfr(self, vnfr):
- vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, vnfr, nsd=self.nsd)
+ vnfr = OpenmanoVnfr(self._log, self._loop, self._cli_api, self.http_api,
+ vnfr, nsd=self.nsd, ssh_key=self._ssh_key)
yield from vnfr.create()
self._vnfrs.append(vnfr)
@asyncio.coroutine
def create(self):
- self._log.debug("Creating openmano scenario")
- name_uuid_map = yield from self._loop.run_in_executor(
- None,
- self._cli_api.ns_list,
- )
-
- if self._nsd_msg.name in name_uuid_map:
- self._log.debug("Found existing openmano scenario")
- self._nsd_uuid = name_uuid_map[self._nsd_msg.name]
- return
-
+ try:
+ self._log.debug("Created openmano scenario")
+ # The self.openmano_nsd_yaml internally creates the scenario if not found.
+ # Assigning the yaml to a variable so that the api is not fired unnecessarily.
+ nsd_yaml = self.openmano_nsd_yaml
- # Use the nsd uuid as the scenario name to rebind to existing
- # scenario on reload or to support muliple instances of the name
- # nsd
- self._nsd_uuid, _ = yield from self._loop.run_in_executor(
- None,
- self._cli_api.ns_create,
- self.openmano_nsd_yaml,
- self._nsd_msg.name
- )
- fpath = dump_openmano_descriptor(
- "{}_nsd".format(self._nsd_msg.name),
- self.openmano_nsd_yaml,
- )
+ self._nsd_uuid = yaml.load(nsd_yaml)['uuid']
+ fpath = dump_openmano_descriptor(
+ "{}_nsd".format(self._nsd_msg.name),
+ nsd_yaml,
+ )
- self._log.debug("Dumped Openmano NS descriptor to: %s", fpath)
+ self._log.debug("Dumped Openmano NS descriptor to: %s", fpath)
- self._created = True
+ self._created = True
+ except Exception as e:
+ self._log.error("Failed to create scenario on Openmano RO : %s", e)
+ raise e
@asyncio.coroutine
def scaling_scenario_create(self):
self._log.debug("Creating scaling openmano scenario")
- self._nsd_uuid, _ = yield from self._loop.run_in_executor(
- None,
- self._cli_api.ns_create,
- self.openmano_scaling_yaml,
- )
+ # The self.openmano_nsd_yaml internally creates the scenario if not found.
+ # Assigning the yaml to a variable so that the api is not fired unnecessarily.
+ nsd_yaml = self.openmano_scaling_yaml
+
+ self._nsd_uuid = yaml.load(nsd_yaml)['uuid']
+
fpath = dump_openmano_descriptor(
"{}_sgd".format(self._nsd_msg.name),
self.scaling_instance_create_yaml,
)
+
+ @asyncio.coroutine
+ def get_nsr_opdata(self):
+ """ NSR opdata associated with this VNFR """
+ xpath = self._project.add_project(
+ "D,/nsr:ns-instance-opdata/nsr:nsr" \
+ "[nsr:ns-instance-config-ref={}]". \
+ format(quoted_key(self.nsr_config_msg.id)))
+
+ results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+ for result in results:
+ entry = yield from result
+ nsr_op = entry.result
+ return nsr_op
+
+ return None
+
+
@asyncio.coroutine
def instance_monitor_task(self):
self._log.debug("Starting Instance monitoring task")
start_time = time.time()
active_vnfs = []
-
+ nsr = yield from self.get_nsr_opdata()
while True:
+ active_vms = 0
+ active_nets = 0
+
yield from asyncio.sleep(1, loop=self._loop)
try:
instance_resp_json,
self._nsr_uuid)
+ for vnf in instance_resp_json['vnfs']:
+ for vm in vnf['vms']:
+ if vm['status'] == 'ACTIVE':
+ active_vms += 1
+ for net in instance_resp_json['nets']:
+ if net['status'] == 'ACTIVE':
+ active_nets += 1
+
+ nsr.orchestration_progress.vms.active = active_vms
+ nsr.orchestration_progress.networks.active = active_nets
+
+ # This is for accesibility of the status from nsm when the control goes back.
+ self._active_vms = active_vms
+ self._active_nets = active_nets
+
+ yield from self._publisher.publish_nsr_opdata(None, nsr)
+
except openmano_client.InstanceStatusError as e:
self._log.error("Could not get NS instance status: %s", str(e))
continue
+
def all_vms_active(vnf):
for vm in vnf["vms"]:
vm_status = vm["status"]
def get_vnf_ip_address(vnf):
if "ip_address" in vnf:
return vnf["ip_address"].strip()
+
+ else:
+ cp_info_list = get_ext_cp_info(vnf)
+
+ for cp_name, ip, mac in cp_info_list:
+ for vld in self.nsd.vlds:
+ if not vld.mgmt_network:
+ continue
+
+ for vld_cp in vld.vnfd_connection_point_ref:
+ if vld_cp.vnfd_connection_point_ref == cp_name:
+ return ip
return None
def get_vnf_mac_address(vnf):
return cp_info_list
def get_vnf_status(vnfr):
- # When we create an openmano descriptor we use <name>__<idx>
+ # When we create an openmano descriptor we use <name>.<idx>
# to come up with openmano constituent VNF name. Use this
# knowledge to map the vnfr back.
- openmano_vnfr_suffix = "__{}".format(
+ openmano_vnfr_suffix = ".{}".format(
vnfr.vnfr.vnfr_msg.member_vnf_index_ref
)
for vnf in instance_resp_json["vnfs"]:
if vnf["vnf_name"].endswith(openmano_vnfr_suffix):
return vnf
-
+
self._log.warning("Could not find vnf status with name that ends with: %s",
openmano_vnfr_suffix)
return None
# If there was a VNF that has a errored VM, then just fail the VNF and stop monitoring.
if any_vms_error(vnf_status):
- self._log.debug("VM was found to be in error state. Marking as failed.")
+ self._log.error("VM was found to be in error state. Marking as failed.")
self._state = OpenmanoNSRecordState.FAILED
vnfr_msg.operational_status = "failed"
yield from self._publisher.publish_vnfr(None, vnfr_msg)
vnf_mac_address = get_vnf_mac_address(vnf_status)
if vnf_ip_address is None:
- self._log.warning("No IP address obtained "
+ self._log.error("No IP address obtained "
"for VNF: {}, will retry.".format(
vnf_status['vnf_name']))
continue
self._log.debug("All VMs in VNF are active. Marking as running.")
vnfr_msg.operational_status = "running"
- self._log.debug("Got VNF ip address: %s, mac-address: %s", vnf_ip_address, vnf_mac_address)
+ self._log.debug("Got VNF ip address: %s, mac-address: %s",
+ vnf_ip_address, vnf_mac_address)
vnfr_msg.mgmt_interface.ip_address = vnf_ip_address
- vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = vnf_ip_address
-
+ vnfr_msg.mgmt_interface.ssh_key.public_key = \
+ vnfr._ssh_key['public_key']
+ vnfr_msg.mgmt_interface.ssh_key.private_key_file = \
+ vnfr._ssh_key['private_key']
for vm in vnf_status["vms"]:
if vm["uuid"] not in self._vdur_console_handler:
- vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._dts, self._log, self._loop,
+ vdur_console_handler = VnfrConsoleOperdataDtsHandler(self._project, self._dts, self._log, self._loop,
self, vnfr_msg.id,vm["uuid"],vm["name"])
yield from vdur_console_handler.register()
self._vdur_console_handler[vm["uuid"]] = vdur_console_handler
if len(active_vnfs) == len(self._vnfrs):
self._state = OpenmanoNSRecordState.RUNNING
- self._log.info("All VNF's are active. Exiting NSR monitoring task")
+ self._log.debug("All VNF's are active. Exiting NSR monitoring task")
return
@asyncio.coroutine
@asyncio.coroutine
def create_vlr(self,vlr):
- self._log.debug("Creating openmano vim network VLR name %s, VLR DC: %s",vlr.vld_msg.name,
- vlr.om_datacenter_name)
+ self._log.error("Creating openmano vim network VLR name %s, VLR DC: %s",vlr.vld_msg.name,
+ vlr.datacenter_name)
net_create = {}
net = {}
net['name'] = vlr.name
net['shared'] = True
net['type'] = 'bridge'
- self._log.debug("Received ip profile is %s",vlr._ip_profile)
+ self._log.error("Received ip profile is %s",vlr._ip_profile)
if vlr._ip_profile and vlr._ip_profile.has_field("ip_profile_params"):
ip_profile_params = vlr._ip_profile.ip_profile_params
ip_profile = {}
fpath = dump_openmano_descriptor(
"{}_vim_net_create_{}".format(self._nsr_config_msg.name,vlr.name),
net_create_msg)
- self._log.debug("Dumped Openmano VIM Net create to: %s", fpath)
+ self._log.error("Dumped Openmano VIM Net create to: %s", fpath)
vim_network_uuid = yield from self._loop.run_in_executor(
None,
self._cli_api.ns_vim_network_create,
net_create_msg,
- vlr.om_datacenter_name)
+ vlr.datacenter_name)
self._vlrs.append(vlr)
-class OpenmanoNsPlugin(rwnsmplugin.NsmPluginBase):
+class OpenmanoNsPlugin(nsmpluginbase.NsmPluginBase):
"""
RW Implentation of the NsmPluginBase
"""
- def __init__(self, dts, log, loop, publisher, ro_account):
+ def __init__(self, dts, log, loop, publisher, ro_account, project):
self._dts = dts
self._log = log
self._loop = loop
self._publisher = publisher
-
+ self._project = project
+
self._cli_api = None
self._http_api = None
self._openmano_nsrs = {}
OpenmanoNSRecordState.__members__.items() \
if member.value == state.value]
- def create_nsr(self, nsr_config_msg, nsd_msg, key_pairs=None):
+ def create_nsr(self, nsr_config_msg, nsd_msg, key_pairs=None, ssh_key=None):
"""
Create Network service record
"""
openmano_nsr = OpenmanoNsr(
- self._dts,
- self._log,
- self._loop,
- self._publisher,
- self._cli_api,
- self._http_api,
- nsd_msg,
- nsr_config_msg,
- key_pairs
- )
+ self._project,
+ self._dts,
+ self._log,
+ self._loop,
+ self._publisher,
+ self._cli_api,
+ self._http_api,
+ nsd_msg,
+ nsr_config_msg,
+ key_pairs,
+ ssh_key,
+ )
+ self.log.debug("NSR created in openmano nsm %s", openmano_nsr)
self._openmano_nsrs[nsr_config_msg.id] = openmano_nsr
@asyncio.coroutine
openmano_nsr = self._openmano_nsrs[nsr.id]
if scaleout:
openmano_vnf_nsr = OpenmanoNsr(
+ self._project,
self._dts,
self._log,
self._loop,
openmano_nsr.nsd_msg,
openmano_nsr.nsr_config_msg,
openmano_nsr.key_pairs,
- vnfr.vnfd.id
+ None,
+ rift_vnfd_id=vnfr.vnfd.id,
)
self._openmano_nsr_by_vnfr_id[nsr.id] = openmano_nsr
if vnfr.id in self._openmano_nsr_by_vnfr_id:
vnfr_msg.operational_status = "init"
self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
- with self._dts.transaction() as xact:
- yield from self._publisher.publish_vnfr(xact, vnfr_msg)
+ yield from self._publisher.publish_vnfr(None, vnfr_msg)
+
+ def update_vnfr(self, vnfr):
+ vnfr_msg = vnfr.vnfr_msg.deep_copy()
+ self._log.debug("Attempting to publish openmano vnf: %s", vnfr_msg)
+ yield from self._publisher.publish_vnfr(None, vnfr_msg)
@asyncio.coroutine
def instantiate_vl(self, nsr, vlr):
openmano_nsr,
)
- with self._dts.transaction() as xact:
- for vnfr in openmano_nsr.vnfrs:
- self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr.vnfr_msg)
- yield from self._publisher.unpublish_vnfr(xact, vnfr.vnfr.vnfr_msg)
+ for vnfr in openmano_nsr.vnfrs:
+ self._log.debug("Unpublishing VNFR: %s", vnfr.vnfr.vnfr_msg)
+ yield from self._publisher.unpublish_vnfr(None, vnfr.vnfr.vnfr_msg)
del self._openmano_nsrs[nsr_id]
from gi.repository import (
RwDts as rwdts,
RwTypes,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwYang
)
import rift.tasklets
""" The network service op data DTS handler """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@property
@asyncio.coroutine
def register(self):
""" Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
+ if self._regh:
+ return
+
+ xpath = self._project.add_project(NsrOpDataDtsHandler.XPATH)
+ self._log.debug("Registering Nsr op data path {} as publisher".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating NSR xact = %s, %s:%s", xact, path, msg)
self.regh.create_element(path, msg, xact=xact)
self._log.debug("Created NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg, flags=rwdts.XactFlag.REPLACE):
+ def update(self, xact, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating NSR xact = %s, %s:%s regh = %s", xact, path, msg, self.regh)
self.regh.update_element(path, msg, flags, xact=xact)
self._log.debug("Updated NSR xact = %s, %s:%s", xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Update an NS record in DTS with the path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting NSR xact:%s, path:%s", xact, path)
self.regh.delete_element(path, xact=xact)
self._log.debug("Deleted NSR xact:%s, path:%s", xact, path)
+ def deregister(self):
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class VnfrPublisherDtsHandler(object):
- """ Registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
+ """ Registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
@asyncio.coroutine
def register(self):
""" Register for Vvnfr create/update/delete/read requests from dts """
+ if self._regh:
+ return
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
"%s action on VirtualNetworkFunctionRecord not supported",
action)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
+ rwdts.Flag.SHARED |
rwdts.Flag.NO_PREP_READ |
rwdts.Flag.CACHE),)
+ def deregister(self):
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def create(self, xact, path, msg):
"""
class VlrPublisherDtsHandler(object):
- """ registers 'D,/vlr:vlr-catalog/vlr:vlr """
+ """ registers 'D,/rw-project:project/vlr:vlr-catalog/vlr:vlr """
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
- def __init__(self, dts, log, loop):
+ def __init__(self, dts, log, loop, project):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._regh = None
def register(self):
""" Register for vlr create/update/delete/read requests from dts """
+ if self._regh:
+ return
+
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts """
"%s action on VirtualLinkRecord not supported",
action)
- self._log.debug("Registering for VLR using xpath: %s",
- VlrPublisherDtsHandler.XPATH,)
+ xpath = self._project.add_project(VlrPublisherDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
hdl = rift.tasklets.DTS.RegistrationHandler()
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VlrPublisherDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
rwdts.Flag.NO_PREP_READ |
rwdts.Flag.CACHE),)
+ def deregister(self):
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def create(self, xact, path, msg):
"""
class VnfdPublisher(object):
- AUTH = ('admin', 'admin')
+ AUTH = ('@rift', 'rift')
HEADERS = {"content-type": "application/vnd.yang.data+json"}
- def __init__(self, use_ssl, ssl_cert, ssl_key, loop):
+ def __init__(self, use_ssl, ssl_cert, ssl_key, loop, project):
self.use_ssl = use_ssl
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
+ self._project = project
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.loop = loop
scheme = "https" if self.use_ssl else "http"
- url = "{}://127.0.0.1:8008/api/config/vnfd-catalog/vnfd/{}"
+ url = "{}://127.0.0.1:8008/api/config/project/{}/vnfd-catalog/vnfd/{}"
- model = RwYang.Model.create_libncx()
- model.load_module("rw-vnfd")
- model.load_module("vnfd")
+ model = RwYang.Model.create_libyang()
+ model.load_module("rw-project-vnfd")
+ model.load_module("project-vnfd")
data = vnfd.to_json(model)
- key = "vnfd:vnfd-catalog"
+ key = "project-vnfd:vnfd-catalog"
newdict = json.loads(data)
if key in newdict:
data = json.dumps(newdict[key])
options["cert"] = (self.ssl_cert, self.ssl_key)
response = requests.put(
- url.format(scheme, vnfd.id),
+ url.format(scheme, self._project.name, vnfd.id),
**options
)
self._loop = loop
self._dts = dts
self.nsm = parent
+ self.project = parent._project
self._log.debug("Initialized ROConfigManager")
def is_ready(self):
@property
def cm_state_xpath(self):
- return ("/rw-conman:cm-state/rw-conman:cm-nsr")
+ return self.project.add_project("/rw-conman:cm-state/rw-conman:cm-nsr")
@classmethod
def map_config_status(cls, status):
'cfg_failed': nsrY.ConfigStates.FAILED,
'ready_no_cfg': nsrY.ConfigStates.CONFIG_NOT_NEEDED,
'ready': nsrY.ConfigStates.CONFIGURED,
+ 'terminate': nsrY.ConfigStates.TERMINATE,
}
return cfg_map[status]
return
try:
- nsrid = cm_nsr['id']
+ nsrid = cm_nsr.id
# Update the VNFRs' config status
- gen = []
- if 'cm_vnfr' in cm_nsr:
- gen = (vnfr for vnfr in cm_nsr['cm_vnfr']
- if vnfr['id'] in self.nsm._vnfrs)
+ gen = (vnfr for vnfr in cm_nsr.cm_vnfr
+ if vnfr.id in self.nsm._vnfrs)
for vnfr in gen:
- vnfrid = vnfr['id']
- new_status = ROConfigManager.map_config_status(vnfr['state'])
+ vnfrid = vnfr.id
+ new_status = ROConfigManager.map_config_status(vnfr.state)
self._log.debug("Updating config status of VNFR {} " \
"in NSR {} to {}({})".
format(vnfrid, nsrid, new_status,
- vnfr['state']))
+ vnfr.state))
yield from \
self.nsm.vnfrs[vnfrid].set_config_status(new_status)
+ yield from \
+ self.nsm.vnfrs[vnfrid].update_config_primitives(
+ vnfr.vnf_configuration,
+ self.nsm.nsrs[nsrid])
+
# Update the NSR's config status
- new_status = ROConfigManager.map_config_status(cm_nsr['state'])
- self._log.info("Updating config status of NSR {} to {}({})".
- format(nsrid, new_status, cm_nsr['state']))
+ new_status = ROConfigManager.map_config_status(cm_nsr.state)
+ self._log.debug("Updating config status of NSR {} to {}({})".
+ format(nsrid, new_status, cm_nsr.state))
- # If terminate nsr request comes when NS instantiation is in 'Configuring state'; self.nsm.nsrs dict
- # is already empty when self.nsm.nsrs[nsrid].set_config_status gets executed. So adding a check here.
+ # If terminate nsr request comes when NS instantiation is in
+ # 'Configuring state'; self.nsm.nsrs dict is already empty when
+ # self.nsm.nsrs[nsrid].set_config_status gets executed. So adding a check here.
if nsrid in self.nsm.nsrs:
- yield from self.nsm.nsrs[nsrid].set_config_status(new_status, cm_nsr.get('state_details'))
+ yield from self.nsm.nsrs[nsrid].set_config_status(
+ new_status,
+ cm_nsr.state_details)
except Exception as e:
self._log.error("Failed to process cm-state for nsr {}: {}".
@asyncio.coroutine
def register(self):
""" Register for cm-state changes """
-
+
@asyncio.coroutine
def on_prepare(xact_info, query_action, ks_path, msg):
""" cm-state changed """
- #print("###>>> cm-state change ({}), msg_dict = {}".format(query_action, msg_dict))
self._log.debug("Received cm-state on_prepare (%s:%s:%s)",
query_action,
ks_path,
if (query_action == rwdts.QueryAction.UPDATE or
query_action == rwdts.QueryAction.CREATE):
# Update Each NSR/VNFR state
- msg_dict = msg.as_dict()
- yield from self.update_ns_cfg_state(msg_dict)
+ # msg_dict = msg.as_dict()
+ yield from self.update_ns_cfg_state(msg)
elif query_action == rwdts.QueryAction.DELETE:
- self._log.debug("DELETE action in on_prepare for cm-state, ignoring")
+ self._log.debug("DELETE action in on_prepare for cm-state, "
+ "ignoring")
else:
raise NotImplementedError(
"%s on cm-state is not supported",
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
try:
- handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- self.dts_reg_hdl = yield from self._dts.register(self.cm_state_xpath,
- flags=rwdts.Flag.SUBSCRIBER,
- handler=handler)
+ handler = rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_prepare)
+ self.dts_reg_hdl = yield from self._dts.register(
+ self.cm_state_xpath,
+ flags=rwdts.Flag.SUBSCRIBER,
+ handler=handler)
+
except Exception as e:
self._log.error("Failed to register for cm-state changes as %s", str(e))
-
+
+
+ def deregister(self):
+ if self.dts_reg_hdl:
+ self.dts_reg_hdl.deregister()
+ self.dts_reg_hdl = None
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
+from . import nsmpluginbase
+from . import openmano_nsm
import asyncio
-import abc
-
-class NsmPluginBase(object):
+class RwNsPlugin(nsmpluginbase.NsmPluginBase):
"""
- Abstract base class for the NSM plugin.
- There will be single instance of this plugin for each plugin type.
+ RW Implentation of the NsmPluginBase
"""
-
- def __init__(self, dts, log, loop, nsm, plugin_name, dts_publisher):
+ def __init__(self, dts, log, loop, publisher, ro_account, project):
self._dts = dts
self._log = log
self._loop = loop
- self._nsm = nsm
- self._plugin_name = plugin_name
- self._dts_publisher = dts_publisher
-
- @property
- def dts(self):
- return self._dts
-
- @property
- def log(self):
- return self._log
+ self._project = project
- @property
- def loop(self):
- return self._loop
-
- @property
- def nsm(self):
- return self._nsm
-
- @abc.abstractmethod
def set_state(self, nsr_id, state):
pass
- @abc.abstractmethod
- def create_nsr(self, nsr):
- """ Create an NSR """
+ def create_nsr(self, nsr_msg, nsd, key_pairs=None, ssh_key=None):
+ """
+ Create Network service record
+ """
pass
- @abc.abstractmethod
@asyncio.coroutine
- def deploy(self, nsr_msg):
+ def deploy(self, nsr):
pass
- @abc.abstractmethod
@asyncio.coroutine
- def instantiate_ns(self, nsr, xact):
- """ Instantiate the network service """
- pass
+ def instantiate_ns(self, nsr, config_xact):
+ """
+ Instantiate NSR with the passed nsr id
+ """
+ yield from nsr.instantiate(config_xact)
- @abc.abstractmethod
@asyncio.coroutine
def instantiate_vnf(self, nsr, vnfr, scaleout=False):
- """ Instantiate the virtual network function """
- pass
+ """
+ Instantiate NSR with the passed nsr id
+ """
+ yield from vnfr.instantiate(nsr)
- @abc.abstractmethod
@asyncio.coroutine
- def instantiate_vl(self, nsr, vl):
- """ Instantiate the virtual link"""
- pass
+ def instantiate_vl(self, nsr, vlr):
+ """
+ Instantiate NSR with the passed nsr id
+ """
+ yield from vlr.instantiate()
- @abc.abstractmethod
@asyncio.coroutine
- def get_nsr(self, nsr_path):
- """ Get the NSR """
+ def terminate_ns(self, nsr):
+ """
+ Terminate the network service
+ """
pass
- @abc.abstractmethod
@asyncio.coroutine
- def get_vnfr(self, vnfr_path):
- """ Get the VNFR """
- pass
+ def terminate_vnf(self, nsr, vnfr, scalein=False):
+ """
+ Terminate the VNF
+ """
+ yield from vnfr.terminate()
- @abc.abstractmethod
@asyncio.coroutine
- def get_vlr(self, vlr_path):
- """ Get the VLR """
- pass
+ def terminate_vl(self, vlr):
+ """
+ Terminate the virtual link
+ """
+ yield from vlr.terminate()
- @abc.abstractmethod
@asyncio.coroutine
- def terminate_ns(self, nsr):
- """Terminate the network service """
- pass
+ def update_vnfr(self, vnfr):
+ """ Update the virtual network function record """
+ yield from vnfr.update_vnfm()
- @abc.abstractmethod
- @asyncio.coroutine
- def terminate_vnf(self, vnfr):
- """Terminate the VNF """
- pass
+class NsmPlugins(object):
+ """ NSM Plugins """
+ def __init__(self):
+ self._plugin_classes = {
+ "openmano": openmano_nsm.OpenmanoNsPlugin,
+ }
- @abc.abstractmethod
- @asyncio.coroutine
- def terminate_vl(self, vlr):
- """Terminate the Virtual Link Record"""
- pass
+ @property
+ def plugins(self):
+ """ Plugin info """
+ return self._plugin_classes
+
+ def __getitem__(self, name):
+ """ Get item """
+ return self._plugin_classes[name]
+
+ def register(self, plugin_name, plugin_class, *args):
+ """ Register a plugin to this Nsm"""
+ self._plugin_classes[plugin_name] = plugin_class
+
+ def deregister(self, plugin_name, plugin_class, *args):
+ """ Deregister a plugin to this Nsm"""
+ if plugin_name in self._plugin_classes:
+ del self._plugin_classes[plugin_name]
+
+ def class_by_plugin_name(self, name):
+ """ Get class by plugin name """
+ return self._plugin_classes[name]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import asyncio
+import gi
+import json
import ncclient
import ncclient.asyncio_manager
import os
+import requests
import shutil
import sys
import tempfile
import time
import uuid
import yaml
-import requests
-import json
-
-from collections import deque
from collections import defaultdict
+from collections import deque
from enum import Enum
+from urllib.parse import urlparse
+
+# disable unsigned certificate warning
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
-import gi
gi.require_version('RwYang', '1.0')
-gi.require_version('RwNsdYang', '1.0')
+gi.require_version('NsdBaseYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
+gi.require_version('NsrYang', '1.0')
gi.require_version('RwTypes', '1.0')
gi.require_version('RwVlrYang', '1.0')
gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('VnfrYang', '1.0')
+gi.require_version('ProjectVnfdYang', '1.0')
from gi.repository import (
RwYang,
RwNsrYang,
NsrYang,
- NsdYang,
+ NsdBaseYang,
+ ProjectNsdYang as NsdYang,
RwVlrYang,
VnfrYang,
RwVnfrYang,
RwsdnalYang,
RwDts as rwdts,
RwTypes,
+ ProjectVnfdYang,
ProtobufC,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
-import rift.tasklets
+from rift.mano.utils.ssh_keys import ManoSshKey
import rift.mano.ncclient
import rift.mano.config_data.config
import rift.mano.dts as mano_dts
+import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ get_add_delete_update_cfgs,
+ DEFAULT_PROJECT,
+ )
from . import rwnsm_conman as conman
from . import cloud
from . import publisher
+from . import subscriber
from . import xpath
from . import config_value_pool
from . import rwvnffgmgr
from . import scale_group
-
+from . import rwnsmplugin
+from . import openmano_nsm
+import functools
+import collections
class NetworkServiceRecordState(Enum):
""" Network Service Record State """
class NsrVlUpdateError(NsrNsdUpdateError):
pass
+class VirtualLinkRecordError(Exception):
+ """ Virtual Links Record Error """
+ pass
+
class VlRecordState(Enum):
""" VL Record State """
""" Vnffg Records class"""
SFF_DP_PORT = 4790
SFF_MGMT_PORT = 5000
- def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name):
+ def __init__(self, dts, log, loop, vnffgmgr, nsr, nsr_name, vnffgd_msg, sdn_account_name,cloud_account_name):
self._dts = dts
self._log = log
self._nsr = nsr
self._nsr_name = nsr_name
self._vnffgd_msg = vnffgd_msg
+ self._cloud_account_name = cloud_account_name
if sdn_account_name is None:
self._sdn_account_name = ''
else:
"sdn_account": self._sdn_account_name,
"operational_status": 'init',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
elif self._vnffgr_state == VnffgRecordState.TERMINATED:
vnffgr_dict = {"id": self._vnffgr_id,
"vnffgd_id_ref": self._vnffgd_msg.id,
"sdn_account": self._sdn_account_name,
"operational_status": 'terminated',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
else:
try:
vnffgr = self._vnffgmgr.fetch_vnffgr(self._vnffgr_id)
"sdn_account": self._sdn_account_name,
"operational_status": 'failed',
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
return vnffgr
"vnffgd_id_ref": self._vnffgd_msg.id,
"vnffgd_name_ref": self._vnffgd_msg.name,
"sdn_account": self._sdn_account_name,
+ "cloud_account": self._cloud_account_name,
}
- vnffgr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
+ vnffgr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vnffgr.from_dict(vnffgr_dict)
for rsp in self._vnffgd_msg.rsp:
vnffgr_rsp = vnffgr.rsp.add()
vnffgr_rsp.id = str(uuid.uuid4())
vnfd = [vnfr.vnfd for vnfr in self._nsr.vnfrs.values() if vnfr.vnfd.id == rsp_cp_ref.vnfd_id_ref]
self._log.debug("VNFD message during VNFFG instantiation is %s",vnfd)
if len(vnfd) > 0 and vnfd[0].has_field('service_function_type'):
- self._log.debug("Service Function Type for VNFD ID %s is %s",rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
+ self._log.debug("Service Function Type for VNFD ID %s is %s",
+ rsp_cp_ref.vnfd_id_ref, vnfd[0].service_function_type)
else:
- self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",rsp_cp_ref.vnfd_id_ref)
+ self._log.error("Service Function Type not available for VNFD ID %s; Skipping in chain",
+ rsp_cp_ref.vnfd_id_ref)
continue
vnfr_cp_ref = vnffgr_rsp.vnfr_connection_point_ref.add()
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
vnfr_cp_ref.connection_point_params.port_id = cp.connection_point_id
vnfr_cp_ref.connection_point_params.name = self._nsr.name + '.' + cp.name
for vdu in vnfr.vdur:
- for ext_intf in vdu.external_interface:
- if ext_intf.name == vnfr_cp_ref.vnfr_connection_point_ref:
+ for intf in vdu.interface:
+ if intf.type_yang == "EXTERNAL" and intf.external_connection_point_ref == vnfr_cp_ref.vnfr_connection_point_ref:
vnfr_cp_ref.connection_point_params.vm_id = vdu.vim_id
self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
vnfr_cp_ref.connection_point_params.vm_id)
rsp_id_ref = _rsp[0].id
rsp_name = _rsp[0].name
else:
- self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
+ self._log.error("RSP with ID %s not found during classifier creation for classifier id %s",
+ vnffgd_classifier.rsp_id_ref,vnffgd_classifier.id)
continue
vnffgr_classifier = vnffgr.classifier.add()
vnffgr_classifier.id = vnffgd_classifier.id
self._log.info("Received vnf op status is %s; retrying",vnfr.operational_status)
if vnfr.operational_status == 'failed':
self._log.error("Fetching VNFR for %s failed", vnfr.id)
- raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" % (self.id, vnfr.id))
+ raise NsrInstantiationFailed("Failed NS %s instantiation due to VNFR %s failure" %
+ (self.id, vnfr.id))
yield from asyncio.sleep(2, loop=self._loop)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
vnffgr_classifier.port_id = cp.connection_point_id
vnffgr_classifier.ip_address = cp.ip_address
for vdu in vnfr.vdur:
- for ext_intf in vdu.external_interface:
- if ext_intf.name == vnffgr_classifier.vnfr_connection_point_ref:
+ for intf in vdu.interface:
+ if intf.type_yang == "EXTERNAL" and intf.external_connection_point_ref == vnffgr_classifier.vnfr_connection_point_ref:
vnffgr_classifier.vm_id = vdu.vim_id
- self._log.debug("VIM ID for CP %s in VNFR %s is %s",cp.name,nsr_vnfr.id,
- vnfr_cp_ref.connection_point_params.vm_id)
+ self._log.debug("VIM ID for CP %s in VNFR %s is %s",
+ cp.name,nsr_vnfr.id,
+ vnfr_cp_ref.connection_point_params.vm_id)
break
self._log.info("VNFFGR msg to be sent is %s", vnffgr)
vnfr = yield from self._nsr.fetch_vnfr(nsr_vnfr.xpath)
self._log.debug("Received VNFR is %s", vnfr)
- sff = RwsdnalYang.VNFFGSff()
+ sff = RwsdnalYang.YangData_RwProject_Project_Vnffgs_VnffgChain_Sff()
sff_list[nsr_vnfr.vnfd.id] = sff
sff.name = nsr_vnfr.name
sff.function_type = nsr_vnfr.vnfd.service_function_chain
XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id, restart_mode=False):
+ def create_record(dts, log, loop, project, nsr_name, vld_msg,
+ datacenter, ip_profile, nsr_id, restart_mode=False):
"""Creates a new VLR object based on the given data.
If restart mode is enabled, then we look for existing records in the
dts,
log,
loop,
+ project,
nsr_name,
vld_msg,
- cloud_account_name,
- om_datacenter,
+ datacenter,
ip_profile,
nsr_id,
)
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vlr:vlr-catalog/vlr:vlr",
+ project.add_project("D,/vlr:vlr-catalog/vlr:vlr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
return vlr_obj
- def __init__(self, dts, log, loop, nsr_name, vld_msg, cloud_account_name, om_datacenter, ip_profile, nsr_id):
+ def __init__(self, dts, log, loop, project, nsr_name, vld_msg,
+ datacenter, ip_profile, nsr_id):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_name = nsr_name
self._vld_msg = vld_msg
- self._cloud_account_name = cloud_account_name
- self._om_datacenter_name = om_datacenter
+ self._datacenter_name = datacenter
self._assigned_subnet = None
self._nsr_id = nsr_id
self._ip_profile = ip_profile
self._state = VlRecordState.INIT
self._prev_state = None
self._create_time = int(time.time())
+ self.state_failed_reason = None
@property
def xpath(self):
""" path for this object """
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self._vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id={}]".
+ format(quoted_key(self._vlr_id)))
@property
def id(self):
# This is a temporary hack to identify manually provisioned inter-site network
return self.vld_msg.name
else:
- return self._nsr_name + "." + self.vld_msg.name
+ return self._project.name + "." +self._nsr_name + "." + self.vld_msg.name
@property
- def cloud_account_name(self):
- """ Cloud account that this VLR should be created in """
- return self._cloud_account_name
-
- @property
- def om_datacenter_name(self):
+ def datacenter_name(self):
""" Datacenter that this VLR should be created in """
- return self._om_datacenter_name
+ return self._datacenter_name
@staticmethod
def vlr_xpath(vlr):
""" Get the VLR path from VLR """
- return (VirtualLinkRecord.XPATH + "[vlr:id = '{}']").format(vlr.id)
+ return (VirtualLinkRecord.XPATH + "[vlr:id={}]").format(quoted_key(vlr.id))
@property
def state(self):
"vld_ref": self.vld_msg.id,
"name": self.name,
"create_time": self._create_time,
- "cloud_account": self.cloud_account_name,
- "om_datacenter": self.om_datacenter_name,
+ "datacenter": self._datacenter_name,
}
if self._ip_profile and self._ip_profile.has_field('ip_profile_params'):
vlr_dict['ip_profile_params' ] = self._ip_profile.ip_profile_params.as_dict()
+
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
+
+ if self.vld_msg.has_field('virtual_connection_points'):
+ for cp in self.vld_msg.virtual_connection_points:
+ vcp = vlr.virtual_connection_points.add()
+ vcp.from_dict(cp.as_dict())
return vlr
def reset_id(self, vlr_id):
def create_nsr_vlr_msg(self, vnfrs):
""" The VLR message"""
- nsr_vlr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_Vlr()
+ nsr_vlr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_Vlr()
nsr_vlr.vlr_ref = self._vlr_id
nsr_vlr.assigned_subnet = self.assigned_subnet
- nsr_vlr.cloud_account = self.cloud_account_name
- nsr_vlr.om_datacenter = self.om_datacenter_name
+ nsr_vlr.datacenter = self._datacenter_name
for conn in self.vld_msg.vnfd_connection_point_ref:
for vnfr in vnfrs:
if (vnfr.vnfd.id == conn.vnfd_id_ref and
vnfr.member_vnf_index == conn.member_vnf_index_ref and
- self.cloud_account_name == vnfr.cloud_account_name and
- self.om_datacenter_name == vnfr.om_datacenter_name):
+ self._datacenter_name == vnfr._datacenter_name):
cp_entry = nsr_vlr.vnfr_connection_point_ref.add()
cp_entry.vnfr_id = vnfr.id
cp_entry.connection_point = conn.vnfd_connection_point_ref
self._log.info("Instantiated VL with xpath %s and vlr:%s",
self.xpath, vlr)
- self._state = VlRecordState.ACTIVE
self._assigned_subnet = vlr.assigned_subnet
def vlr_in_vns(self):
self._state = VlRecordState.TERMINATED
self._log.debug("Terminated VL id:%s", self.id)
+ def set_state_from_op_status(self, operational_status):
+ """ Set the state of this VL based on operational_status"""
+
+ self._log.debug("set_state_from_op_status called for vlr id %s with value %s", self.id, operational_status)
+ if operational_status == 'running':
+ self._state = VlRecordState.ACTIVE
+ elif operational_status == 'failed':
+ self._state = VlRecordState.FAILED
+ elif operational_status == 'vl_alloc_pending':
+ self._state = VlRecordState.INSTANTIATION_PENDING
+ else:
+ raise VirtualLinkRecordError("Unknown operational_status %s" % (operational_status))
class VnfRecordState(Enum):
""" Vnf Record State """
@staticmethod
@asyncio.coroutine
- def create_record(dts, log, loop, vnfd, const_vnfd_msg, nsd_id, nsr_name,
- cloud_account_name, om_datacenter_name, nsr_id, group_name, group_instance_id,
- placement_groups, restart_mode=False):
+ def create_record(dts, log, loop, project, vnfd, nsr_config, const_vnfd_msg, nsd_id, nsr_name,
+ datacenter_name, nsr_id, group_name, group_instance_id,
+ placement_groups, cloud_config, restart_mode=False):
"""Creates a new VNFR object based on the given data.
If restart mode is enabled, then we look for existing records in the
Returns:
VirtualNetworkFunctionRecord
"""
+
vnfr_obj = VirtualNetworkFunctionRecord(
dts,
log,
loop,
+ project,
vnfd,
+ nsr_config,
const_vnfd_msg,
nsd_id,
nsr_name,
- cloud_account_name,
- om_datacenter_name,
+ datacenter_name,
nsr_id,
group_name,
group_instance_id,
placement_groups,
+ cloud_config,
restart_mode=restart_mode)
if restart_mode:
res_iter = yield from dts.query_read(
- "D,/vnfr:vnfr-catalog/vnfr:vnfr",
+ project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr"),
rwdts.XactFlag.MERGE)
for fut in res_iter:
dts,
log,
loop,
+ project,
vnfd,
+ nsr_config,
const_vnfd_msg,
nsd_id,
nsr_name,
- cloud_account_name,
- om_datacenter_name,
+ datacenter_name,
nsr_id,
group_name=None,
group_instance_id=None,
placement_groups = [],
+ cloud_config = None,
restart_mode = False):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vnfd = vnfd
+ self._nsr_config = nsr_config
self._const_vnfd_msg = const_vnfd_msg
self._nsd_id = nsd_id
self._nsr_name = nsr_name
self._nsr_id = nsr_id
- self._cloud_account_name = cloud_account_name
- self._om_datacenter_name = om_datacenter_name
+ self._datacenter_name = datacenter_name
self._group_name = group_name
self._group_instance_id = group_instance_id
self._placement_groups = placement_groups
+ self._cloud_config = cloud_config
+ self.restart_mode = restart_mode
+
self._config_status = NsrYang.ConfigStates.INIT
self._create_time = int(time.time())
self._state = VnfRecordState.INIT
self._state_failed_reason = None
+ self._active_vdus = 0
+
self.config_store = rift.mano.config_data.config.ConfigStore(self._log)
self.configure()
self._vnfr_id = str(uuid.uuid4())
self._name = None
+
+ self.substitute_vnf_input_parameters = VnfInputParameterSubstitution(self._log,
+ self._const_vnfd_msg,
+ self._project)
self._vnfr_msg = self.create_vnfr_msg()
self._log.debug("Set VNFR {} config type to {}".
format(self.name, self.config_type))
- self.restart_mode = restart_mode
if group_name is None and group_instance_id is not None:
@property
def xpath(self):
""" VNFR xpath """
- return "D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id = '{}']".format(self.id)
+ return self._project.add_project("D,/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]"
+ .format(quoted_key(self.id)))
@property
def vnfr_msg(self):
@property
def const_vnfr_msg(self):
""" VNFR message """
- return RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ConstituentVnfrRef(vnfr_id=self.id,cloud_account=self.cloud_account_name,om_datacenter=self._om_datacenter_name)
+ return RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ConstituentVnfrRef(
+ vnfr_id=self.id, datacenter=self._datacenter_name)
@property
def vnfd(self):
return self._vnfd
@property
- def cloud_account_name(self):
- """ Cloud account that this VNF should be created in """
- return self._cloud_account_name
-
- @property
- def om_datacenter_name(self):
+ def datacenter_name(self):
""" Datacenter that this VNF should be created in """
- return self._om_datacenter_name
+ return self._datacenter_name
@property
if self._name is not None:
return self._name
- name_tags = [self._nsr_name]
+ name_tags = [self._project.name, self._nsr_name]
if self._group_name is not None:
name_tags.append(self._group_name)
@staticmethod
def vnfr_xpath(vnfr):
""" Get the VNFR path from VNFR """
- return (VirtualNetworkFunctionRecord.XPATH + "[vnfr:id = '{}']").format(vnfr.id)
+ return (VirtualNetworkFunctionRecord.XPATH +
+ "[vnfr:id={}]").format(quoted_key(vnfr.id))
@property
def config_type(self):
def configure(self):
self.config_store.merge_vnfd_config(
+ self._project.name,
self._nsd_id,
self._vnfd,
self.member_vnf_index,
"id": self.id,
"nsr_id_ref": self._nsr_id,
"name": self.name,
- "cloud_account": self._cloud_account_name,
- "om_datacenter": self._om_datacenter_name,
+ "datacenter": self._datacenter_name,
"config_status": self.config_status
}
vnfr_dict.update(vnfd_copy_dict)
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict(),
- ignore_missing_keys=True)
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr.vnfd = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd. \
+ from_dict(self.vnfd.as_dict())
vnfr.member_vnf_index_ref = self.member_vnf_index
vnfr.vnf_configuration.from_dict(self._vnfd.vnf_configuration.as_dict())
group = vnfr.placement_groups_info.add()
group.from_dict(group_info.as_dict())
+ if self._cloud_config and len(self._cloud_config.as_dict()):
+ self._log.debug("Cloud config during vnfr create is {}".format(self._cloud_config))
+ vnfr.cloud_config = self._cloud_config
+
# UI expects the monitoring param field to exist
vnfr.monitoring_param = []
self._log.debug("Get vnfr_msg for VNFR {} : {}".format(self.name, vnfr))
+
+ if self.restart_mode:
+ vnfr.operational_status = 'init'
+ else:
+ # Set Operational Status as pre-init for Input Param Substitution
+ vnfr.operational_status = 'pre_init'
+
return vnfr
@asyncio.coroutine
format(self.name, self.vnfr_msg))
yield from self._dts.query_update(
self.xpath,
- rwdts.XactFlag.TRACE,
+ rwdts.XactFlag.REPLACE,
self.vnfr_msg
)
format(self.name, self._config_status,
self.config_type, status))
if self._config_status == NsrYang.ConfigStates.CONFIGURED:
- self._log.error("Updating already configured VNFR {}".
- format(self.name))
+ self._log.warning("Updating already configured VNFR {}".
+ format(self.name))
return
if self._config_status != status:
# But not sure whats the use of this variable?
self.vnfr_msg.config_status = status_to_string(status)
except Exception as e:
- self._log.error("Exception=%s", str(e))
- pass
+ self._log.exception("Exception=%s", str(e))
self._log.debug("Updated VNFR {} status to {}".format(self.name, status))
return False
+ @asyncio.coroutine
+ def update_config_primitives(self, vnf_config, nsr):
+ # Update only after we are configured
+ if self._config_status == NsrYang.ConfigStates.INIT:
+ return
+
+ if not vnf_config.as_dict():
+ return
+
+ self._log.debug("Update VNFR {} config: {}".
+ format(self.name, vnf_config.as_dict()))
+
+ # Update config primitive
+ updated = False
+ for prim in self._vnfd.vnf_configuration.config_primitive:
+ for p in vnf_config.config_primitive:
+ if prim.name == p.name:
+ for param in prim.parameter:
+ for pa in p.parameter:
+ if pa.name == param.name:
+ if pa.default_value and \
+ (pa.default_value != param.default_value):
+ param.default_value = pa.default_value
+ param.read_only = pa.read_only
+ updated = True
+ break
+ self._log.debug("Prim: {}".format(prim.as_dict()))
+ break
+
+ if updated:
+ self._log.debug("Updated VNFD {} config: {}".
+ format(self._vnfd.name,
+ self._vnfd.vnf_configuration))
+ self._vnfr_msg = self.create_vnfr_msg()
+
+ try:
+ yield from nsr.nsm_plugin.update_vnfr(self)
+ except Exception as e:
+ self._log.error("Exception updating VNFM with new config "
+ "primitive for VNFR {}: {}".
+ format(self.name, e))
+ self._log.exception(e)
+
@asyncio.coroutine
def instantiate(self, nsr):
""" Instantiate this VNFR"""
def find_vlr_for_cp(conn):
""" Find VLR for the given connection point """
- for vlr in nsr.vlrs:
+ for vlr_id, vlr in nsr.vlrs.items():
for vnfd_cp in vlr.vld_msg.vnfd_connection_point_ref:
if (vnfd_cp.vnfd_id_ref == self._vnfd.id and
vnfd_cp.vnfd_connection_point_ref == conn.name and
vnfd_cp.member_vnf_index_ref == self.member_vnf_index and
- vlr.cloud_account_name == self.cloud_account_name):
+ vlr._datacenter_name == self._datacenter_name):
self._log.debug("Found VLR for cp_name:%s and vnf-index:%d",
conn.name, self.member_vnf_index)
return vlr
return None
# For every connection point in the VNFD fill in the identifier
+ self._log.debug("Add connection point for VNF %s: %s",
+ self.vnfr_msg.name, self._vnfd.connection_point)
for conn_p in self._vnfd.connection_point:
- cpr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint()
+ cpr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint()
cpr.name = conn_p.name
cpr.type_yang = conn_p.type_yang
if conn_p.has_field('port_security_enabled'):
continue
cpr.vlr_ref = vlr_ref.id
+
self.vnfr_msg.connection_point.append(cpr)
self._log.debug("Connection point [%s] added, vnf id=%s vnfd id=%s",
cpr, self.vnfr_msg.id, self.vnfr_msg.vnfd.id)
+ self._log.debug("VNFR {} restart mode {}".
+ format(self.vnfr_msg.id, self.restart_mode))
if not self.restart_mode:
- yield from self._dts.query_create(self.xpath,
- 0, # this is sub
- self.vnfr_msg)
+ # Checking for NS Terminate.
+ if nsr._ns_terminate_received == False:
+ # Create with pre-init operational state publishes the vnfr for substitution.
+ yield from self._dts.query_create(self.xpath, 0, self.vnfr_msg)
+ # Call to substitute VNF Input Parameter
+ self.substitute_vnf_input_parameters(self.vnfr_msg, self._nsr_config)
+ # Calling Update with pre-init operational data after Param substitution to instatntiate vnfr
+ yield from self._dts.query_update(self.xpath, 0, self.vnfr_msg)
+
else:
yield from self._dts.query_update(self.xpath,
0,
self.vnfr_msg)
self._log.info("Created VNF with xpath %s and vnfr %s",
- self.xpath, self.vnfr_msg)
-
- self._log.info("Instantiated VNFR with xpath %s and vnfd %s, vnfr %s",
- self.xpath, self._vnfd, self.vnfr_msg)
+ self.xpath, self.vnfr_msg)
@asyncio.coroutine
def update_state(self, vnfr_msg):
@asyncio.coroutine
def instantiation_failed(self, failed_reason=None):
""" This VNFR instantiation failed"""
- self._log.error("VNFR %s instantiation failed", self._vnfr_id)
+ self._log.debug("VNFR %s instantiation failed", self._vnfr_id)
self.set_state(VnfRecordState.FAILED)
self._state_failed_reason = failed_reason
event_list = []
idx = 1
for entry in self._events:
- event = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_OperationalEvents()
+ event = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_OperationalEvents()
event.id = idx
idx += 1
event.timestamp, event.event, event.description, event.details = entry
""" Network service record """
XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
- def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg, sdn_account_name, key_pairs, restart_mode=False,
+ def __init__(self, dts, log, loop, nsm, nsm_plugin, nsr_cfg_msg,
+ sdn_account_name, key_pairs, project, restart_mode=False,
vlr_handler=None):
self._dts = dts
self._log = log
self._nsm_plugin = nsm_plugin
self._sdn_account_name = sdn_account_name
self._vlr_handler = vlr_handler
+ self._project = project
self._nsd = None
self._nsr_msg = None
self._nsr_regh = None
self._key_pairs = key_pairs
- self._vlrs = []
+ self._ssh_key_file = None
+ self._ssh_pub_key = None
+ self._vlrs = {}
self._vnfrs = {}
self._vnfds = {}
self._vnffgrs = {}
self._is_active = False
self._vl_phase_completed = False
self._vnf_phase_completed = False
+ self.instantiated = set()
+
+ # Used for orchestration_progress
+ self._active_vms = 0
+ self._active_networks = 0
+
+ # A flag to indicate if the NS has failed, currently it is recorded in
+ # operational status, but at the time of termination this field is
+ # over-written making it difficult to identify the failure.
+ self._is_failed = False
# Initalise the state to init
# The NSR moves through the following transitions
self.set_state(NetworkServiceRecordState.INIT)
- self.substitute_input_parameters = InputParameterSubstitution(self._log)
+ self.substitute_input_parameters = InputParameterSubstitution(self._log, self._project)
+
+ # Create an asyncio loop to know when the virtual links are ready
+ self._vls_ready = asyncio.Event(loop=self._loop)
+
+ # This variable stores all the terminate events received per NS. This is then used to prevent any
+ # further nsr non-terminate updates received in case of terminate being called bedore ns in in running state.
+ self._ns_terminate_received = False
@property
def nsm_plugin(self):
def set_state(self, state):
""" Set state for this NSR"""
- self._log.debug("Setting state to %s", state)
# We are in init phase and is moving to the next state
# The new state could be a FAILED state or VNF_INIIT_PHASE
if self.state == NetworkServiceRecordState.VL_INIT_PHASE:
self._vnf_phase_completed = True
self._op_status.set_state(state)
+
self._nsm_plugin.set_state(self.id, state)
@property
return self._nsr_cfg_msg.name
@property
- def cloud_account_name(self):
- return self._nsr_cfg_msg.cloud_account
-
- @property
- def om_datacenter_name(self):
- if self._nsr_cfg_msg.has_field('om_datacenter'):
- return self._nsr_cfg_msg.om_datacenter
+ def _datacenter_name(self):
+ if self._nsr_cfg_msg.has_field('datacenter'):
+ return self._nsr_cfg_msg.datacenter
return None
@property
""" Config status for NSR """
return self._config_status
+ @property
+ def nsm(self):
+ """NS Manager"""
+ return self._nsm
+
+ @property
+ def is_failed(self):
+ return self._is_failed
+
+ @property
+ def public_key(self):
+ return self._ssh_pub_key
+
+ @property
+ def private_key(self):
+ return self._ssh_key_file
+
def resolve_placement_group_cloud_construct(self, input_group):
"""
Returns the cloud specific construct for placement group
for group_info in self._nsr_cfg_msg.nsd_placement_group_maps:
if group_info.placement_group_ref == input_group.name:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items() if k != 'placement_group_ref'}
for param in copy_dict:
def __str__(self):
- return "NSR(name={}, nsd_id={}, cloud_account={})".format(
- self.name, self.nsd_id, self.cloud_account_name
+ return "NSR(name={}, nsd_id={}, data center={})".format(
+ self.name, self.nsd_id, self._datacenter_name
)
def _get_vnfd(self, vnfd_id, config_xact):
""" Fetch vnfd msg for the passed vnfd id """
return self._nsm.get_vnfd(vnfd_id, config_xact)
- def _get_vnfd_cloud_account(self, vnfd_member_index):
- """ Fetch Cloud Account for the passed vnfd id """
- if self._nsr_cfg_msg.vnf_cloud_account_map:
- vim_accounts = [(vnf.cloud_account,vnf.om_datacenter) for vnf in self._nsr_cfg_msg.vnf_cloud_account_map \
- if vnfd_member_index == vnf.member_vnf_index_ref]
+ def _get_vnfd_datacenter(self, vnfd_member_index):
+ """ Fetch datacenter for the passed vnfd id """
+ if self._nsr_cfg_msg.vnf_datacenter_map:
+ vim_accounts = [vnf.datacenter for vnf in self._nsr_cfg_msg.vnf_datacenter_map \
+ if str(vnfd_member_index) == str(vnf.member_vnf_index_ref)]
if vim_accounts and vim_accounts[0]:
return vim_accounts[0]
- return (self.cloud_account_name,self.om_datacenter_name)
+ return self._datacenter_name
def _get_constituent_vnfd_msg(self, vnf_index):
for const_vnfd in self.nsd_msg.constituent_vnfd:
def scaling_trigger_str(self, trigger):
SCALING_TRIGGER_STRS = {
- NsdYang.ScalingTrigger.PRE_SCALE_IN : 'pre-scale-in',
- NsdYang.ScalingTrigger.POST_SCALE_IN : 'post-scale-in',
- NsdYang.ScalingTrigger.PRE_SCALE_OUT : 'pre-scale-out',
- NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post-scale-out',
+ NsdBaseYang.ScalingTrigger.PRE_SCALE_IN : 'pre-scale-in',
+ NsdBaseYang.ScalingTrigger.POST_SCALE_IN : 'post-scale-in',
+ NsdBaseYang.ScalingTrigger.PRE_SCALE_OUT : 'pre-scale-out',
+ NsdBaseYang.ScalingTrigger.POST_SCALE_OUT : 'post-scale-out',
}
try:
return SCALING_TRIGGER_STRS[trigger]
self._log.exception(e)
return "Unknown trigger"
+ def generate_ssh_key_pair(self, config_xact):
+ '''Generate a ssh key pair if required'''
+ if self._ssh_key_file:
+ self._log.debug("Key pair already generated")
+ return
+
+ gen_key = False
+ for cv in self.nsd_msg.constituent_vnfd:
+ vnfd = self._get_vnfd(cv.vnfd_id_ref, config_xact)
+ if vnfd and vnfd.mgmt_interface.ssh_key:
+ gen_key = True
+ break
+
+ if not gen_key:
+ return
+
+ try:
+ key = ManoSshKey(self._log)
+ path = tempfile.mkdtemp()
+ key.write_to_disk(name=self.id, directory=path)
+ self._ssh_key_file = "file://{}".format(key.private_key_file)
+ self._ssh_pub_key = key.public_key
+ except Exception as e:
+ self._log.exception("Error generating ssh key for {}: {}".
+ format(self.nsr_cfg_msg.name, e))
+
@asyncio.coroutine
def instantiate_vls(self):
"""
"""
self._log.debug("Instantiating %d VLs in NSD id %s", len(self._vlrs),
self.id)
- for vlr in self._vlrs:
+ for vlr_id, vlr in self._vlrs.items():
yield from self.nsm_plugin.instantiate_vl(self, vlr)
- vlr.state = VlRecordState.ACTIVE
+ if not isinstance(self.nsm_plugin, rwnsmplugin.RwNsPlugin):
+ self._vls_ready.set()
+
+ # Wait for the VLs to be ready before yielding control out
+ self._log.debug("Waitng for %d VLs in NSR id %s to be active",
+ len(self._vlrs), self.id)
+ if self._vlrs:
+ self._log.debug("NSR id:%s, name:%s - Waiting for %d VLs to be ready",
+ self.id, self.name, len(self._vlrs))
+ yield from self._vls_ready.wait()
+ else:
+ self._log.debug("NSR id:%s, name:%s, No virtual links found",
+ self.id, self.name)
+ self._vls_ready.set()
+ self._log.info("All %d VLs in NSR id %s are active, start the VNFs",
+ len(self._vlrs), self.id)
@asyncio.coroutine
def create(self, config_xact):
""" Create this network service"""
+ self._log.debug("Create NS {} for {}".format(self.name, self._project.name))
# Create virtual links for all the external vnf
# connection points in this NS
yield from self.create_vls()
@asyncio.coroutine
def apply_scale_group_config_script(self, script, group, scale_instance, trigger, vnfrs=None):
""" Apply config based on script for scale group """
+ rift_var_root_dir = os.environ['RIFT_VAR_ROOT']
@asyncio.coroutine
def add_vnfrs_data(vnfrs_list):
""" Add as a dict each of the VNFRs data """
vnfrs_data = []
+
for vnfr in vnfrs_list:
self._log.debug("Add VNFR {} data".format(vnfr))
vnfr_data = dict()
vnfr_data['name'] = vnfr.name
- if trigger in [NsdYang.ScalingTrigger.PRE_SCALE_IN, NsdYang.ScalingTrigger.POST_SCALE_OUT]:
+ if trigger in [NsdBaseYang.ScalingTrigger.PRE_SCALE_IN,
+ NsdBaseYang.ScalingTrigger.POST_SCALE_OUT]:
# Get VNF management and other IPs, etc
opdata = yield from self.fetch_vnfr(vnfr.xpath)
self._log.debug("VNFR {} op data: {}".format(vnfr.name, opdata))
try:
vnfr_data['rw_mgmt_ip'] = opdata.mgmt_interface.ip_address
vnfr_data['rw_mgmt_port'] = opdata.mgmt_interface.port
+ vnfr_data['member_vnf_index_ref'] = opdata.member_vnf_index_ref
+ vnfr_data['vdur_data'] = []
+ for vdur in opdata.vdur:
+ vdur_data = dict()
+ vdur_data['vm_name'] = vdur.name
+ vdur_data['vm_mgmt_ip'] = vdur.vm_management_ip
+ vnfr_data['vdur_data'].append(vdur_data)
except Exception as e:
self._log.error("Unable to get management IP for vnfr {}:{}".
format(vnfr.name, e))
if script[0] == '/':
path = script
else:
- path = os.path.join(os.environ['RIFT_INSTALL'], "usr/bin", script)
+ path = os.path.join(rift_var_root_dir,
+ 'launchpad/packages/nsd',
+ self._project.name,
+ self.nsd_id, 'scripts',
+ script)
+
if not os.path.exists(path):
- self._log.error("Config faled for scale group {}: Script does not exist at {}".
+ self._log.error("Config failed for scale group {}: Script does not exist at {}".
format(group.name, path))
return False
@asyncio.coroutine
def update_config_status(success=True, err_msg=None):
- self._log.debug("Update %s config status to %r : %s",
+ """ This is ugly!!!
+ We are trying to determine the scaling instance's config status
+ as a collation of the config status associated with 4 different triggers
+ """
+ self._log.debug("Update %s scaling config status to %r : %s",
scale_instance, success, err_msg)
if (scale_instance.config_status == "failed"):
# Do not update the config status if it is already in failed state
else:
# We are in configuring state
# Only after post scale out mark instance as configured
- if trigger == NsdYang.ScalingTrigger.POST_SCALE_OUT:
+ if trigger == NsdBaseYang.ScalingTrigger.POST_SCALE_OUT:
if success:
scale_instance.config_status = "configured"
+ for vnfr in scale_instance.vnfrs:
+ if vnfr.config_status == "configuring":
+ vnfr.vnfr_msg.config_status = "configured"
+ yield from vnfr.update_vnfm()
else:
scale_instance.config_status = "failed"
scale_instance.config_err_msg = err_msg
+
yield from self.update_state()
+ # Publish config state as update_state seems to care only operational status
+ yield from self.publish()
config = group.trigger_config(trigger)
if config is None:
+ if trigger == NsdBaseYang.ScalingTrigger.POST_SCALE_OUT:
+ self._log.debug("No config needed, update %s scaling config status to configured",
+ scale_instance)
+ scale_instance.config_status = "configured"
return True
self._log.debug("Scaling group {} config: {}".format(group.name, config))
- if config.has_field("ns_config_primitive_name_ref"):
- config_name = config.ns_config_primitive_name_ref
+ if config.has_field("ns_service_primitive_name_ref"):
+ config_name = config.ns_service_primitive_name_ref
nsd_msg = self.nsd_msg
config_primitive = None
for ns_cfg_prim in nsd_msg.service_primitive:
self._log.debug("Scaling group {} config primitive: {}".format(group.name, config_primitive))
if config_primitive.has_field("user_defined_script"):
- rc = yield from self.apply_scale_group_config_script(config_primitive.user_defined_script,
+ script_path = '/'.join(["launchpad/packages/nsd", self._project.name, nsd_msg.id, "scripts", config_primitive.user_defined_script])
+ rc = yield from self.apply_scale_group_config_script(script_path,
group, scale_instance, trigger, vnfrs)
err_msg = None
if not rc:
const_vnfd_msg = self._get_constituent_vnfd_msg(vnf_index)
vnfd_msg = self._get_vnfd(const_vnfd_msg.vnfd_id_ref, config_xact)
- cloud_account_name, om_datacenter_name = self._get_vnfd_cloud_account(const_vnfd_msg.member_vnf_index)
- if cloud_account_name is None:
- cloud_account_name = self.cloud_account_name
+ datacenter_name = self._get_vnfd_datacenter(const_vnfd_msg.member_vnf_index)
+ if datacenter_name is None:
+ datacenter_name = self._datacenter_name
for _ in range(count):
- vnfr = yield from self.create_vnf_record(vnfd_msg, const_vnfd_msg, cloud_account_name, om_datacenter_name, group_name, index)
+ vnfr = yield from self.create_vnf_record(vnfd_msg, const_vnfd_msg, datacenter_name, group_name, index)
scale_instance.add_vnfr(vnfr)
vnfrs.append(vnfr)
return vnfrs
yield from self.update_state()
try:
- rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_OUT,
+ rc = yield from self.apply_scaling_group_config(NsdBaseYang.ScalingTrigger.PRE_SCALE_OUT,
group, scale_instance, vnfrs)
if not rc:
self._log.error("Pre scale out config for scale group {} ({}) failed".
@asyncio.coroutine
def terminate_instance():
- self._log.debug("Terminating %s VNFRS" % scale_instance)
- rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.PRE_SCALE_IN,
+ self._log.debug("Terminating scaling instance %s VNFRS" % scale_instance)
+ rc = yield from self.apply_scaling_group_config(NsdBaseYang.ScalingTrigger.PRE_SCALE_IN,
group, scale_instance)
if not rc:
self._log.error("Pre scale in config for scale group {} ({}) failed".
@asyncio.coroutine
def post_scale_out_task(group, instance):
# Apply post scale out config once all VNFRs are active
- rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_OUT,
+ rc = yield from self.apply_scaling_group_config(NsdBaseYang.ScalingTrigger.POST_SCALE_OUT,
group, instance)
instance.operational_status = "running"
if rc:
elif instance.operational_status == "vnf_terminate_phase":
if all([state == VnfRecordState.TERMINATED for state in instance_vnf_state_list]):
instance.operational_status = "terminated"
- rc = yield from self.apply_scaling_group_config(NsdYang.ScalingTrigger.POST_SCALE_IN,
+ rc = yield from self.apply_scaling_group_config(NsdBaseYang.ScalingTrigger.POST_SCALE_IN,
group, instance)
if rc:
self._log.debug("Scale in for group {} and instance {} succeeded".
self,
self.name,
vnffgd,
- self._sdn_account_name
+ self._sdn_account_name,
+ self._datacenter_name
)
self._vnffgrs[vnffgr.id] = vnffgr
return profile[0] if profile else None
@asyncio.coroutine
- def _create_vls(self, vld, cloud_account,om_datacenter):
+ def _create_vls(self, vld, datacenter):
"""Create a VLR in the cloud account specified using the given VLD
Args:
vld : VLD yang obj
- cloud_account : Cloud account name
+ datacenter : Cloud account name
Returns:
VirtualLinkRecord
self._dts,
self._log,
self._loop,
+ self._project,
self.name,
vld,
- cloud_account,
- om_datacenter,
+ datacenter,
self.resolve_vld_ip_profile(self.nsd_msg, vld),
self.id,
restart_mode=self.restart_mode)
return vlr
- def _extract_cloud_accounts_for_vl(self, vld):
+ def _extract_datacenters_for_vl(self, vld):
"""
Extracts the list of cloud accounts from the NS Config obj
Rules:
- 1. Cloud accounts based connection point (vnf_cloud_account_map)
+ 1. Cloud accounts based connection point (vnf_datacenter_map)
Args:
vld : VLD yang object
Returns:
TYPE: Description
"""
- cloud_account_list = []
+ datacenter_list = []
- if self._nsr_cfg_msg.vnf_cloud_account_map:
- # Handle case where cloud_account is None
- vnf_cloud_map = {}
- for vnf in self._nsr_cfg_msg.vnf_cloud_account_map:
- if vnf.cloud_account is not None or vnf.om_datacenter is not None:
- vnf_cloud_map[vnf.member_vnf_index_ref] = (vnf.cloud_account,vnf.om_datacenter)
+ if self._nsr_cfg_msg.vnf_datacenter_map:
+ # Handle case where datacenter is None
+ vnf_datacenter_map = {}
+ for vnf in self._nsr_cfg_msg.vnf_datacenter_map:
+ if vnf.datacenter is not None or vnf.datacenter is not None:
+ vnf_datacenter_map[vnf.member_vnf_index_ref] = \
+ vnf.datacenter
for vnfc in vld.vnfd_connection_point_ref:
- cloud_account = vnf_cloud_map.get(
- vnfc.member_vnf_index_ref,
- (self.cloud_account_name,self.om_datacenter_name))
+ datacenter = vnf_datacenter_map.get(
+ vnfc.member_vnf_index_ref, self._datacenter_name)
- cloud_account_list.append(cloud_account)
+ datacenter_list.append(datacenter)
- if self._nsr_cfg_msg.vl_cloud_account_map:
- for vld_map in self._nsr_cfg_msg.vl_cloud_account_map:
+ if self._nsr_cfg_msg.vl_datacenter_map:
+ for vld_map in self._nsr_cfg_msg.vl_datacenter_map:
if vld_map.vld_id_ref == vld.id:
- for cloud_account in vld_map.cloud_accounts:
- cloud_account_list.extend((cloud_account,None))
- for om_datacenter in vld_map.om_datacenters:
- cloud_account_list.extend((None,om_datacenter))
+ for datacenter in vld_map.datacenters:
+ datacenter_list.append(datacenter)
# If no config has been provided then fall-back to the default
# account
- if not cloud_account_list:
- cloud_account_list = [(self.cloud_account_name,self.om_datacenter_name)]
+ if not datacenter_list:
+ datacenter_list.append(self._datacenter_name)
- self._log.debug("VL {} cloud accounts: {}".
- format(vld.name, cloud_account_list))
- return set(cloud_account_list)
+ self._log.debug("VL {} data center list: {}".
+ format(vld.name, datacenter_list))
+ return set(datacenter_list)
@asyncio.coroutine
def create_vls(self):
for vld in self.nsd_msg.vld:
self._log.debug("Found vld %s in nsr id %s", vld, self.id)
- cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
- for cloud_account,om_datacenter in cloud_account_list:
- vlr = yield from self._create_vls(vld, cloud_account,om_datacenter)
- self._vlrs.append(vlr)
-
+ datacenter_list = self._extract_datacenters_for_vl(vld)
+ for datacenter in datacenter_list:
+ vlr = yield from self._create_vls(vld, datacenter)
+ self._vlrs[vlr.id] = vlr
+ self._nsm.add_vlr_id_nsr_map(vlr.id, self)
@asyncio.coroutine
def create_vl_instance(self, vld):
- self._log.debug("Create VL for {}: {}".format(self.id, vld.as_dict()))
+ self._log.error("Create VL for {}: {}".format(self.id, vld.as_dict()))
# Check if the VL is already present
vlr = None
- for vl in self._vlrs:
+ for vl_id, vl in self._vlrs.items():
if vl.vld_msg.id == vld.id:
- self._log.debug("The VLD %s already in NSR %s as VLR %s with status %s",
+ self._log.error("The VLD %s already in NSR %s as VLR %s with status %s",
vld.id, self.id, vl.id, vl.state)
vlr = vl
if vlr.state != VlRecordState.TERMINATED:
- err_msg = "VLR for VL %s in NSR %s already instantiated", \
- vld, self.id
+ err_msg = "VLR for VL {} in NSR {} already instantiated". \
+ format(vld, self.id)
self._log.error(err_msg)
raise NsrVlUpdateError(err_msg)
break
if vlr is None:
- cloud_account_list = self._extract_cloud_accounts_for_vl(vld)
- for account,om_datacenter in cloud_account_list:
- vlr = yield from self._create_vls(vld, account,om_datacenter)
- self._vlrs.append(vlr)
+ datacenter_list = self._extract_datacenters_for_vl(vld)
+ for datacenter in datacenter_list:
+ vlr = yield from self._create_vls(vld, account, datacenter)
+ self._vlrs[vlr.id] = vlr
+ self._nsm.add_vlr_id_nsr_map(vlr.id, self)
vlr.state = VlRecordState.INSTANTIATION_PENDING
yield from self.update_state()
try:
yield from self.nsm_plugin.instantiate_vl(self, vlr)
- vlr.state = VlRecordState.ACTIVE
except Exception as e:
err_msg = "Error instantiating VL for NSR {} and VLD {}: {}". \
@asyncio.coroutine
def delete_vl_instance(self, vld):
- for vlr in self._vlrs:
+ for vlr_id, vlr in self._vlrs.items():
if vlr.vld_msg.id == vld.id:
self._log.debug("Found VLR %s for VLD %s in NSR %s",
vlr.id, vld.id, self.id)
try:
yield from self.nsm_plugin.terminate_vl(vlr)
vlr.state = VlRecordState.TERMINATED
- self._vlrs.remove(vlr)
+ del self._vlrs[vlr]
+ self.remove_vlr_id_nsr_map(vlr.id)
except Exception as e:
err_msg = "Error terminating VL for NSR {} and VLD {}: {}". \
continue
vnfd_msg = self._get_vnfd(const_vnfd.vnfd_id_ref, config_xact)
- cloud_account_name,om_datacenter_name = self._get_vnfd_cloud_account(const_vnfd.member_vnf_index)
- if cloud_account_name is None:
- cloud_account_name = self.cloud_account_name
- yield from self.create_vnf_record(vnfd_msg, const_vnfd, cloud_account_name, om_datacenter_name)
-
+ datacenter_name = self._get_vnfd_datacenter(const_vnfd.member_vnf_index)
+ if datacenter_name is None:
+ datacenter_name = self._datacenter_name
+ yield from self.create_vnf_record(vnfd_msg, const_vnfd, datacenter_name)
def get_placement_groups(self, vnfd_msg, const_vnfd):
placement_groups = []
for group in self.nsd_msg.placement_groups:
for member_vnfd in group.member_vnfd:
if (member_vnfd.vnfd_id_ref == vnfd_msg.id) and \
- (member_vnfd.member_vnf_index_ref == const_vnfd.member_vnf_index):
+ (member_vnfd.member_vnf_index_ref == str(const_vnfd.member_vnf_index)):
group_info = self.resolve_placement_group_cloud_construct(group)
if group_info is None:
self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
placement_groups.append(group_info)
return placement_groups
+ def get_cloud_config(self):
+ cloud_config = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_CloudConfig()
+ self._log.debug("Received key pair is {}".format(self._key_pairs))
+
+ for authorized_key in self.nsr_cfg_msg.ssh_authorized_key:
+ if authorized_key.key_pair_ref in self._key_pairs:
+ key_pair = cloud_config.key_pair.add()
+ key_pair.from_dict(self._key_pairs[authorized_key.key_pair_ref].as_dict())
+ for nsd_key_pair in self.nsd_msg.key_pair:
+ key_pair = cloud_config.key_pair.add()
+ key_pair.from_dict(key_pair.as_dict())
+ for nsr_cfg_user in self.nsr_cfg_msg.user:
+ user = cloud_config.user.add()
+ user.name = nsr_cfg_user.name
+ user.user_info = nsr_cfg_user.user_info
+ for ssh_key in nsr_cfg_user.ssh_authorized_key:
+ if ssh_key.key_pair_ref in self._key_pairs:
+ key_pair = user.key_pair.add()
+ key_pair.from_dict(self._key_pairs[ssh_key.key_pair_ref].as_dict())
+ for nsd_user in self.nsd_msg.user:
+ user = cloud_config.user.add()
+ user.from_dict(nsd_user.as_dict())
+
+ self._log.debug("Formed cloud-config msg is {}".format(cloud_config))
+ return cloud_config
+
@asyncio.coroutine
- def create_vnf_record(self, vnfd_msg, const_vnfd, cloud_account_name, om_datacenter_name, group_name=None, group_instance_id=None):
+ def create_vnf_record(self, vnfd_msg, const_vnfd, datacenter_name, group_name=None, group_instance_id=None):
# Fetch the VNFD associated with this VNF
placement_groups = self.get_placement_groups(vnfd_msg, const_vnfd)
- self._log.info("Cloud Account for VNF %d is %s",const_vnfd.member_vnf_index,cloud_account_name)
+ cloud_config = self.get_cloud_config()
+ self._log.info("Cloud Account for VNF %d is %s",const_vnfd.member_vnf_index,datacenter_name)
self._log.info("Launching VNF: %s (Member Index: %s) in NSD plancement Groups: %s",
vnfd_msg.name,
const_vnfd.member_vnf_index,
[ group.name for group in placement_groups])
+
vnfr = yield from VirtualNetworkFunctionRecord.create_record(self._dts,
self._log,
self._loop,
+ self._project,
vnfd_msg,
+ self._nsr_cfg_msg,
const_vnfd,
self.nsd_id,
self.name,
- cloud_account_name,
- om_datacenter_name,
+ datacenter_name,
self.id,
group_name,
group_instance_id,
placement_groups,
+ cloud_config,
restart_mode=self.restart_mode,
)
if vnfr.id in self._vnfrs:
"""
This function instantiates VNFs for every VNF in this Network Service
"""
- self._log.debug("Instantiating %u VNFs in NS %s", len(vnfrs), self.id)
- for vnf in vnfrs:
+ @asyncio.coroutine
+ def instantiate_vnf(vnf):
self._log.debug("Instantiating VNF: %s in NS %s", vnf, self.id)
+ vnfd_id = vnf.vnfr_msg.vnfd.id
+ for dependency_vnf in dependencies[vnfd_id]:
+ while dependency_vnf not in self.instantiated:
+ yield from asyncio.sleep(1, loop=self._loop)
+
yield from self.nsm_plugin.instantiate_vnf(self, vnf,scaleout)
+ self.instantiated.add(vnfd_id)
+
+ self._log.debug("Instantiating %u VNFs in NS %s", len(vnfrs), self.id)
+ dependencies = collections.defaultdict(list)
+ for dependency_vnf in self._nsr_cfg_msg.nsd.vnf_dependency:
+ dependencies[dependency_vnf.vnf_source_ref].append(dependency_vnf.vnf_depends_on_ref)
+
+ # The dictionary copy is to ensure that if a terminate is initiated right after instantiation, the
+ # Runtime error for "dictionary changed size during iteration" does not occur.
+ # vnfrs - 'dict_values' object
+ # vnfrs_copy - list object
+ vnfrs_copy = list(vnfrs)
+ tasks = []
+ for vnf in vnfrs_copy:
+ vnf_task = self._loop.create_task(instantiate_vnf(vnf))
+ tasks.append(vnf_task)
+
+ if len(tasks) > 0:
+ self._log.debug("Waiting for %s instantiate_vnf tasks to complete", len(tasks))
+ done, pending = yield from asyncio.wait(tasks, loop=self._loop, timeout=30)
+ if pending:
+ self._log.error("The Instantiate vnf task timed out after 30 seconds.")
+ raise VirtualNetworkFunctionRecordError("Task tied out : ", pending)
@asyncio.coroutine
def instantiate_vnffgs(self):
@asyncio.coroutine
def publish(self):
""" This function publishes this NSR """
+
self._nsr_msg = self.create_msg()
self._log.debug("Publishing the NSR with xpath %s and nsr %s",
self._log.debug("Publishing NSR in RUNNING state!")
#raise()
- with self._dts.transaction() as xact:
- yield from self._nsm.nsr_handler.update(xact, self.nsr_xpath, self._nsr_msg)
- if self._op_status.state == NetworkServiceRecordState.RUNNING:
- self._debug_running = True
+ yield from self._nsm.nsr_handler.update(None, self.nsr_xpath, self._nsr_msg)
+ if self._op_status.state == NetworkServiceRecordState.RUNNING:
+ self._debug_running = True
@asyncio.coroutine
- def unpublish(self, xact):
+ def unpublish(self, xact=None):
""" Unpublish this NSR object """
self._log.debug("Unpublishing Network service id %s", self.id)
+
yield from self._nsm.nsr_handler.delete(xact, self.nsr_xpath)
@property
def nsr_xpath(self):
""" Returns the xpath associated with this NSR """
- return(
+ return self._project.add_project((
"D,/nsr:ns-instance-opdata" +
- "/nsr:nsr[nsr:ns-instance-config-ref = '{}']"
- ).format(self.id)
+ "/nsr:nsr[nsr:ns-instance-config-ref={}]"
+ ).format(quoted_key(self.id)))
@staticmethod
def xpath_from_nsr(nsr):
""" Returns the xpath associated with this NSR op data"""
return (NetworkServiceRecord.XPATH +
- "[nsr:ns-instance-config-ref = '{}']").format(nsr.id)
+ "[nsr:ns-instance-config-ref={}]").format(quoted_key(nsr.id))
@property
def nsd_xpath(self):
""" Return NSD config xpath."""
- return(
- "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}']"
- ).format(self.nsd_id)
+ return self._project.add_project((
+ "C,/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]"
+ ).format(quoted_key(self.nsd_id)))
@asyncio.coroutine
def instantiate(self, config_xact):
# Move the state to INIITALIZING
self.set_state(NetworkServiceRecordState.INIT)
- event_descr = "Instantiation Request Received NSR Id:%s" % self.id
+ event_descr = "Instantiation Request Received NSR Id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("instantiating", event_descr)
# Find the NSD
self._nsd = self._nsr_cfg_msg.nsd
# Merge any config and initial config primitive values
- self.config_store.merge_nsd_config(self.nsd_msg)
+ self.config_store.merge_nsd_config(self.nsd_msg, self._project.name)
self._log.debug("Merged NSD: {}".format(self.nsd_msg.as_dict()))
- event_descr = "Fetched NSD with descriptor id %s" % self.nsd_id
+ event_descr = "Fetched NSD with descriptor id %s, NS Name: %s" % (self.nsd_id, self.name)
self.record_event("nsd-fetched", event_descr)
if self._nsd is None:
self.id, self.nsd_id)
# instantiate the VLs
- event_descr = ("Instantiating %s external VLs for NSR id %s" %
- (len(self.nsd_msg.vld), self.id))
+ event_descr = ("Instantiating %s external VLs for NSR id: %s, NS Name: %s " %
+ (len(self.nsd_msg.vld), self.id, self.name))
self.record_event("begin-external-vls-instantiation", event_descr)
self.set_state(NetworkServiceRecordState.VL_INIT_PHASE)
- yield from self.instantiate_vls()
-
# Publish the NSR to DTS
yield from self.publish()
- event_descr = ("Finished instantiating %s external VLs for NSR id %s" %
- (len(self.nsd_msg.vld), self.id))
+ if self._ns_terminate_received:
+ self._log.debug("Terminate Received. Interrupting Instantiation at event : begin-external-vls-instantiation.")
+ # Setting this flag as False again as this is a state where neither VL or VNF have been instantiated.
+ self._ns_terminate_received = False
+ # At this stage only ns-instance opdata is published. Cleaning up the record.
+ yield from self.unpublish()
+ return
+
+ yield from self.instantiate_vls()
+
+ event_descr = ("Finished instantiating %s external VLs for NSR id: %s, NS Name: %s " %
+ (len(self.nsd_msg.vld), self.id, self.name))
self.record_event("end-external-vls-instantiation", event_descr)
self.set_state(NetworkServiceRecordState.VNF_INIT_PHASE)
+ # Publish the NSR to DTS
+ yield from self.publish()
+
self._log.debug("Instantiating VNFs ...... nsr[%s], nsd[%s]",
self.id, self.nsd_id)
# instantiate the VNFs
- event_descr = ("Instantiating %s VNFS for NSR id %s" %
- (len(self.nsd_msg.constituent_vnfd), self.id))
+ event_descr = ("Instantiating %s VNFS for NSR id: %s, NS Name: %s " %
+ (len(self.nsd_msg.constituent_vnfd), self.id, self.name))
self.record_event("begin-vnf-instantiation", event_descr)
+ if self._ns_terminate_received:
+ self._log.debug("Terminate Received. Interrupting Instantiation at event : end-external-vls-instantiation.")
+ return
+
yield from self.instantiate_vnfs(self._vnfrs.values())
- self._log.debug(" Finished instantiating %d VNFs for NSR id %s",
- len(self.nsd_msg.constituent_vnfd), self.id)
+ self._log.debug(" Finished instantiating %d VNFs for NSR id: %s, NS Name: %s",
+ len(self.nsd_msg.constituent_vnfd), self.id, self.name)
- event_descr = ("Finished instantiating %s VNFs for NSR id %s" %
- (len(self.nsd_msg.constituent_vnfd), self.id))
+ event_descr = ("Finished instantiating %s VNFs for NSR id: %s, NS Name: %s" %
+ (len(self.nsd_msg.constituent_vnfd), self.id, self.name))
self.record_event("end-vnf-instantiation", event_descr)
+ # Publish the NSR to DTS
+ yield from self.publish()
+
if len(self.vnffgrs) > 0:
#self.set_state(NetworkServiceRecordState.VNFFG_INIT_PHASE)
- event_descr = ("Instantiating %s VNFFGS for NSR id %s" %
- (len(self.nsd_msg.vnffgd), self.id))
+ event_descr = ("Instantiating %s VNFFGS for NSR id: %s, NS Name: %s" %
+ (len(self.nsd_msg.vnffgd), self.id, self.name))
self.record_event("begin-vnffg-instantiation", event_descr)
+ if self._ns_terminate_received:
+ self._log.debug("Terminate Received. Interrupting Instantiation at event : begin-vnffg-instantiation.")
+ return
+
yield from self.instantiate_vnffgs()
- event_descr = ("Finished instantiating %s VNFFGDs for NSR id %s" %
- (len(self.nsd_msg.vnffgd), self.id))
+ event_descr = ("Finished instantiating %s VNFFGDs for NSR id: %s, NS Name: %s" %
+ (len(self.nsd_msg.vnffgd), self.id, self.name))
self.record_event("end-vnffg-instantiation", event_descr)
if self.has_scaling_instances():
- event_descr = ("Instantiating %s Scaling Groups for NSR id %s" %
- (len(self._scaling_groups), self.id))
+ event_descr = ("Instantiating %s Scaling Groups for NSR id: %s, NS Name: %s" %
+ (len(self._scaling_groups), self.id, self.name))
self.record_event("begin-scaling-group-instantiation", event_descr)
+
+ if self._ns_terminate_received:
+ self._log.debug("Terminate Received. Interrupting Instantiation at event : begin-scaling-group-instantiation.")
+ return
+
yield from self.instantiate_scaling_instances(config_xact)
self.record_event("end-scaling-group-instantiation", event_descr)
# virtual links and vnfs are instantiated
yield from self.nsm_plugin.deploy(self._nsr_msg)
- self._log.debug("Publishing NSR...... nsr[%s], nsd[%s]",
- self.id, self.nsd_id)
+ self._log.debug("Publishing NSR...... nsr[%s], nsd[%s], for NS[%s]",
+ self.id, self.nsd_id, self.name)
# Publish the NSR to DTS
yield from self.publish()
- self._log.debug("Published NSR...... nsr[%s], nsd[%s]",
- self.id, self.nsd_id)
+ self._log.debug("Published NSR...... nsr[%s], nsd[%s], for NS[%s]",
+ self.id, self.nsd_id, self.name)
def on_instantiate_done(fut):
# If the do_instantiate fails, then publish NSR with failed result
yield from self.publish()
+ if status == NsrYang.ConfigStates.TERMINATE:
+ yield from self.terminate_ns_cont()
+
@asyncio.coroutine
def is_active(self):
""" This NS is active """
self._log.debug("Network service %s is active ", self.id)
self._is_active = True
- event_descr = "NSR in running state for NSR id %s" % self.id
+ event_descr = "NSR in running state for NSR id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("ns-running", event_descr)
yield from self.publish()
self._log.error("Network service id:%s, name:%s instantiation failed",
self.id, self.name)
self.set_state(NetworkServiceRecordState.FAILED)
+ self._is_failed = True
- event_descr = "Instantiation of NS %s failed" % self.id
+ event_descr = "Instantiation of NS %s - %s failed" % (self.id, self.name)
self.record_event("ns-failed", event_descr, evt_details=failed_reason)
# Publish the NSR to DTS
@asyncio.coroutine
def terminate_vnfrs(self, vnfrs, scalein=False):
""" Terminate VNFRS in this network service """
- self._log.debug("Terminating VNFs in network service %s", self.id)
- for vnfr in vnfrs:
+ self._log.debug("Terminating VNFs in network service %s - %s", self.id, self.name)
+ vnfr_ids = []
+ for vnfr in list(vnfrs):
self._log.debug("Terminating VNFs in network service %s %s", vnfr.id, self.id)
- if scalein:
- yield from self.nsm_plugin.terminate_vnf(self, vnfr, scalein=True)
+ yield from self.nsm_plugin.terminate_vnf(self, vnfr, scalein=scalein)
+ vnfr_ids.append(vnfr.id)
+
+ for vnfr_id in vnfr_ids:
+ self._vnfrs.pop(vnfr_id, None)
@asyncio.coroutine
def terminate(self):
- """ Terminate a NetworkServiceRecord."""
+ """Start terminate of a NetworkServiceRecord."""
+ # Move the state to TERMINATE
+ self.set_state(NetworkServiceRecordState.TERMINATE)
+ event_descr = "Terminate being processed for NS Id: %s, NS Name: %s" % (self.id, self.name)
+ self.record_event("terminate", event_descr)
+ self._log.debug("Terminating network service id: %s, NS Name: %s", self.id, self.name)
+
+ # Adding the NSR ID on terminate Evet. This will be checked to halt the instantiation if not already finished.
+ self._ns_terminate_received = True
+
+ yield from self.publish()
+
+ if self._is_failed:
+ # IN case the instantiation failed, then trigger a cleanup immediately
+ # don't wait for Cfg manager, as it will have no idea of this NSR.
+ # Due to the failure
+ yield from self.terminate_ns_cont()
+
+
+ @asyncio.coroutine
+ def terminate_ns_cont(self):
+ """Config script related to terminate finished, continue termination"""
def terminate_vnffgrs():
""" Terminate VNFFGRS in this network service """
- self._log.debug("Terminating VNFFGRs in network service %s", self.id)
+ self._log.debug("Terminating VNFFGRs in network service %s - %s", self.id, self.name)
for vnffgr in self.vnffgrs.values():
yield from vnffgr.terminate()
def terminate_vlrs():
""" Terminate VLRs in this netork service """
- self._log.debug("Terminating VLs in network service %s", self.id)
- for vlr in self.vlrs:
+ self._log.debug("Terminating VLs in network service %s - %s", self.id, self.name)
+ for vlr_id, vlr in self.vlrs.items():
yield from self.nsm_plugin.terminate_vl(vlr)
vlr.state = VlRecordState.TERMINATED
- self._log.debug("Terminating network service id %s", self.id)
-
- # Move the state to TERMINATE
- self.set_state(NetworkServiceRecordState.TERMINATE)
- event_descr = "Terminate being processed for NS Id:%s" % self.id
- self.record_event("terminate", event_descr)
-
# Move the state to VNF_TERMINATE_PHASE
- self._log.debug("Terminating VNFFGs in NS ID: %s", self.id)
+ self._log.debug("Terminating VNFFGs in NS ID: %s, NS Name: %s", self.id, self.name)
self.set_state(NetworkServiceRecordState.VNFFG_TERMINATE_PHASE)
- event_descr = "Terminating VNFFGS in NS Id:%s" % self.id
+ event_descr = "Terminating VNFFGS in NS Id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("terminating-vnffgss", event_descr)
yield from terminate_vnffgrs()
# Move the state to VNF_TERMINATE_PHASE
self.set_state(NetworkServiceRecordState.VNF_TERMINATE_PHASE)
- event_descr = "Terminating VNFS in NS Id:%s" % self.id
+ event_descr = "Terminating VNFS in NS Id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("terminating-vnfs", event_descr)
yield from self.terminate_vnfrs(self.vnfrs.values())
# Move the state to VL_TERMINATE_PHASE
self.set_state(NetworkServiceRecordState.VL_TERMINATE_PHASE)
- event_descr = "Terminating VLs in NS Id:%s" % self.id
+ event_descr = "Terminating VLs in NS Id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("terminating-vls", event_descr)
yield from terminate_vlrs()
yield from self.nsm_plugin.terminate_ns(self)
+ # Remove the generated SSH key
+ if self._ssh_key_file:
+ p = urlparse(self._ssh_key_file)
+ if p[0] == 'file':
+ path = os.path.dirname(p[2])
+ self._log.debug("NSR {}: Removing keys in {}".format(self.name,
+ path))
+ shutil.rmtree(path, ignore_errors=True)
+
# Move the state to TERMINATED
self.set_state(NetworkServiceRecordState.TERMINATED)
- event_descr = "Terminated NS Id:%s" % self.id
+ event_descr = "Terminated NS Id: %s, NS Name: %s" % (self.id, self.name)
self.record_event("terminated", event_descr)
+ # Unpublish the NSR record
+ self._log.debug("Unpublishing the network service %s - %s", self.id, self.name)
+ yield from self.unpublish()
+
+ # Finaly delete the NS instance from this NS Manager
+ self._log.debug("Deleting the network service %s - %s", self.id, self.name)
+ self.nsm.delete_nsr(self.id)
+
def enable(self):
""""Enable a NetworkServiceRecord."""
pass
def create_msg(self):
""" The network serice record as a message """
nsr_dict = {"ns_instance_config_ref": self.id}
- nsr = RwNsrYang.YangData_Nsr_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
- #nsr.cloud_account = self.cloud_account_name
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr.from_dict(nsr_dict)
+ #nsr.datacenter = self.cloud_account_name
nsr.sdn_account = self._sdn_account_name
nsr.name_ref = self.name
nsr.nsd_ref = self.nsd_id
nsr.create_time = self._create_time
nsr.uptime = int(time.time()) - self._create_time
+ # Added for OpenMano
+
+ nsr.orchestration_progress.networks.total = len(self.nsd_msg.vld)
+ if isinstance(self.nsm_plugin, openmano_nsm.OpenmanoNsPlugin):
+ # Taking the last update by OpenMano
+ nsr.orchestration_progress.networks.active = self.nsm_plugin._openmano_nsrs[self.id]._active_nets
+ else:
+ nsr.orchestration_progress.networks.active = self._active_networks
+ no_of_vdus = 0
+ for vnfr_id, vnfr in self._vnfrs.items():
+ no_of_vdus += len(vnfr.vnfd.vdu)
+
+ nsr.orchestration_progress.vms.total = no_of_vdus
+ if isinstance(self.nsm_plugin, openmano_nsm.OpenmanoNsPlugin):
+ # Taking the last update by OpenMano
+ nsr.orchestration_progress.vms.active = self.nsm_plugin._openmano_nsrs[self.id]._active_vms
+ else:
+ nsr.orchestration_progress.vms.active = self._active_vms
+
+ # Generated SSH key
+ if self._ssh_pub_key:
+ nsr.ssh_key_generated.private_key_file = self._ssh_key_file
+ nsr.ssh_key_generated.public_key = self._ssh_pub_key
+
for cfg_prim in self.nsd_msg.service_primitive:
- cfg_prim = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
+ cfg_prim = RwNsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ServicePrimitive.from_dict(
cfg_prim.as_dict())
nsr.service_primitive.append(cfg_prim)
- for init_cfg in self.nsd_msg.initial_config_primitive:
- prim = NsrYang.NsrInitialConfigPrimitive.from_dict(
+ for init_cfg in self.nsd_msg.initial_service_primitive:
+ prim = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_InitialServicePrimitive.from_dict(
init_cfg.as_dict())
- nsr.initial_config_primitive.append(prim)
+ nsr.initial_service_primitive.append(prim)
+
+ for term_cfg in self.nsd_msg.terminate_service_primitive:
+ prim = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_TerminateServicePrimitive.from_dict(
+ term_cfg.as_dict())
+ nsr.terminate_service_primitive.append(prim)
if self.vl_phase_completed():
- for vlr in self.vlrs:
+ for vlr_id, vlr in self.vlrs.items():
nsr.vlr.append(vlr.create_nsr_vlr_msg(self.vnfrs.values()))
if self.vnf_phase_completed():
""" Re-evaluate this NS's state """
curr_state = self._op_status.state
- if curr_state == NetworkServiceRecordState.TERMINATED:
- self._log.debug("NS (%s) in terminated state, not updating state", self.id)
- return
+ # This means that the terminate has been fired before the NS was UP.
+ if self._ns_terminate_received:
+ # Resetting this flag so that terminate ns is not called via subsequent DTS Handlers after the intial call.
+ self._ns_terminate_received = False
+ yield from self.terminate_ns_cont()
+ else:
+ if curr_state == NetworkServiceRecordState.TERMINATED:
+ self._log.debug("NS (%s - %s) in terminated state, not updating state", self.id, self.name)
+ return
- new_state = NetworkServiceRecordState.RUNNING
- self._log.info("Received update_state for nsr: %s, curr-state: %s",
- self.id, curr_state)
+ new_state = NetworkServiceRecordState.RUNNING
+ self._log.debug("Received update_state for nsr: %s, curr-state: %s",
+ self.id, curr_state)
- # Check all the VNFRs are present
- for _, vnfr in self.vnfrs.items():
- if vnfr.state in [VnfRecordState.ACTIVE, VnfRecordState.TERMINATED]:
- pass
- elif vnfr.state == VnfRecordState.FAILED:
- if vnfr._prev_state != vnfr.state:
- event_descr = "Instantiation of VNF %s failed" % vnfr.id
- event_error_details = vnfr.state_failed_reason
- self.record_event("vnf-failed", event_descr, evt_details=event_error_details)
- vnfr.set_state(VnfRecordState.FAILED)
- else:
- self._log.info("VNF state did not change, curr=%s, prev=%s",
- vnfr.state, vnfr._prev_state)
- new_state = NetworkServiceRecordState.FAILED
- break
- else:
- self._log.info("VNF %s in NSR %s is still not active; current state is: %s",
- vnfr.id, self.id, vnfr.state)
- new_state = curr_state
-
- # If new state is RUNNING; check all VLs
- if new_state == NetworkServiceRecordState.RUNNING:
- for vl in self.vlrs:
-
- if vl.state in [VlRecordState.ACTIVE, VlRecordState.TERMINATED]:
- pass
- elif vl.state == VlRecordState.FAILED:
- if vl.prev_state != vl.state:
- event_descr = "Instantiation of VL %s failed" % vl.id
- event_error_details = vl.state_failed_reason
- self.record_event("vl-failed", event_descr, evt_details=event_error_details)
- vl.prev_state = vl.state
+ # check all VLs
+ if (isinstance(self.nsm_plugin, rwnsmplugin.RwNsPlugin)):
+ for vlr_id, vl in self.vlrs.items():
+ self._log.debug("VLR %s state %s", vlr_id, vl.state)
+ if vl.state in [VlRecordState.ACTIVE, VlRecordState.TERMINATED]:
+ continue
+ elif vl.state == VlRecordState.FAILED:
+ if vl.prev_state != vl.state:
+ event_descr = "Instantiation of VL %s failed" % vl.id
+ event_error_details = vl.state_failed_reason
+ self.record_event("vl-failed", event_descr, evt_details=event_error_details)
+ vl.prev_state = vl.state
+ new_state = NetworkServiceRecordState.FAILED
+ break
+ else:
+ self._log.debug("VL already in failed state")
else:
- self._log.debug("VL %s already in failed state")
- else:
- if vl.state in [VlRecordState.INSTANTIATION_PENDING, VlRecordState.INIT]:
- new_state = NetworkServiceRecordState.VL_INSTANTIATE
+ if vl.state in [VlRecordState.INSTANTIATION_PENDING, VlRecordState.INIT]:
+ new_state = NetworkServiceRecordState.VL_INSTANTIATE
+ break
+
+ if vl.state in [VlRecordState.TERMINATE_PENDING]:
+ new_state = NetworkServiceRecordState.VL_TERMINATE
+ break
+
+ # Check all the VNFRs are present
+ if new_state == NetworkServiceRecordState.RUNNING:
+ for _, vnfr in self.vnfrs.items():
+ self._log.debug("VNFR state %s", vnfr.state)
+ if vnfr.state in [VnfRecordState.ACTIVE, VnfRecordState.TERMINATED]:
+ active_vdus = 0
+ for vnfr in self.vnfrs:
+ active_vdus += self.nsm._vnfrs[vnfr]._active_vdus
+
+ if self._active_vms != active_vdus:
+ self._active_vms = active_vdus
+ yield from self.publish()
+
+ continue
+
+ elif vnfr.state == VnfRecordState.FAILED:
+ if vnfr._prev_state != vnfr.state:
+ event_descr = "Instantiation of VNF %s for NS: %s failed" % (vnfr.id, self.name)
+ event_error_details = vnfr.state_failed_reason
+ self.record_event("vnf-failed", event_descr, evt_details=event_error_details)
+ vnfr.set_state(VnfRecordState.FAILED)
+ else:
+ self._log.info("VNF state did not change, curr=%s, prev=%s",
+ vnfr.state, vnfr._prev_state)
+ new_state = NetworkServiceRecordState.FAILED
break
-
- if vl.state in [VlRecordState.TERMINATE_PENDING]:
- new_state = NetworkServiceRecordState.VL_TERMINATE
+ else:
+ self._log.debug("VNF %s in NSR %s - %s is still not active; current state is: %s",
+ vnfr.id, self.id, self.name, vnfr.state)
+ new_state = curr_state
+
+ # If new state is RUNNING; check VNFFGRs are also active
+ if new_state == NetworkServiceRecordState.RUNNING:
+ for _, vnffgr in self.vnffgrs.items():
+ self._log.debug("Checking vnffgr state for nsr %s is: %s",
+ self.id, vnffgr.state)
+ if vnffgr.state == VnffgRecordState.ACTIVE:
+ continue
+ elif vnffgr.state == VnffgRecordState.FAILED:
+ event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id
+ self.record_event("vnffg-failed", event_descr)
+ new_state = NetworkServiceRecordState.FAILED
break
+ else:
+ self._log.info("VNFFGR %s in NSR %s - %s is still not active; current state is: %s",
+ vnffgr.id, self.id, self.name, vnffgr.state)
+ new_state = curr_state
+
+ # Update all the scaling group instance operational status to
+ # reflect the state of all VNFR within that instance
+ yield from self._update_scale_group_instances_status()
- # If new state is RUNNING; check VNFFGRs are also active
- if new_state == NetworkServiceRecordState.RUNNING:
- for _, vnffgr in self.vnffgrs.items():
- self._log.info("Checking vnffgr state for nsr %s is: %s",
- self.id, vnffgr.state)
- if vnffgr.state == VnffgRecordState.ACTIVE:
- pass
- elif vnffgr.state == VnffgRecordState.FAILED:
- event_descr = "Instantiation of VNFFGR %s failed" % vnffgr.id
- self.record_event("vnffg-failed", event_descr)
- new_state = NetworkServiceRecordState.FAILED
+ for _, group in self._scaling_groups.items():
+ if group.state == scale_group.ScaleGroupState.SCALING_OUT:
+ new_state = NetworkServiceRecordState.SCALING_OUT
+ break
+ elif group.state == scale_group.ScaleGroupState.SCALING_IN:
+ new_state = NetworkServiceRecordState.SCALING_IN
break
+
+ if new_state != curr_state:
+ self._log.debug("Changing state of Network service %s - %s from %s to %s",
+ self.id, self.name, curr_state, new_state)
+ if new_state == NetworkServiceRecordState.RUNNING:
+ yield from self.is_active()
+ elif new_state == NetworkServiceRecordState.FAILED:
+ # If the NS is already active and we entered scaling_in, scaling_out,
+ # do not mark the NS as failing if scaling operation failed.
+ if curr_state in [NetworkServiceRecordState.SCALING_OUT,
+ NetworkServiceRecordState.SCALING_IN] and self._is_active:
+ new_state = NetworkServiceRecordState.RUNNING
+ self.set_state(new_state)
+ else:
+ yield from self.instantiation_failed()
else:
- self._log.info("VNFFGR %s in NSR %s is still not active; current state is: %s",
- vnffgr.id, self.id, vnffgr.state)
- new_state = curr_state
+ self.set_state(new_state)
- # Update all the scaling group instance operational status to
- # reflect the state of all VNFR within that instance
- yield from self._update_scale_group_instances_status()
+ yield from self.publish()
- for _, group in self._scaling_groups.items():
- if group.state == scale_group.ScaleGroupState.SCALING_OUT:
- new_state = NetworkServiceRecordState.SCALING_OUT
- break
- elif group.state == scale_group.ScaleGroupState.SCALING_IN:
- new_state = NetworkServiceRecordState.SCALING_IN
- break
+ def vl_instantiation_state(self):
+ """ Check if all VLs in this NS are active """
+ for vl_id, vlr in self.vlrs.items():
+ if vlr.state == VlRecordState.ACTIVE:
+ continue
+ elif vlr.state == VlRecordState.FAILED:
+ return VlRecordState.FAILED
+ elif vlr.state == VlRecordState.TERMINATED:
+ return VlRecordState.TERMINATED
+ elif vlr.state == VlRecordState.INSTANTIATION_PENDING:
+ return VlRecordState.INSTANTIATION_PENDING
+ else:
+ self._log.error("vlr %s still in state %s", vlr, vlr.state)
+ raise VirtualLinkRecordError("Invalid state %s" %(vlr.state))
+ return VlRecordState.ACTIVE
+
+ def vl_instantiation_successful(self):
+ """ Mark that all VLs in this NS are active """
+ if self._vls_ready.is_set():
+ self._log.error("NSR id %s, vls_ready is already set", self.id)
+
+ if self.vl_instantiation_state() == VlRecordState.ACTIVE:
+ self._log.debug("NSR id %s, All %d vlrs are in active state %s",
+ self.id, len(self.vlrs), self.vl_instantiation_state)
+ self._vls_ready.set()
+
+ def vlr_event(self, vlr, action):
+ self._log.debug("Received VLR %s with action:%s", vlr, action)
+
+ if vlr.id not in self.vlrs:
+ self._log.error("VLR %s:%s received for unknown id, state:%s",
+ vlr.id, vlr.name, vlr.operational_status)
+ return
- if new_state != curr_state:
- self._log.debug("Changing state of Network service %s from %s to %s",
- self.id, curr_state, new_state)
- if new_state == NetworkServiceRecordState.RUNNING:
- yield from self.is_active()
- elif new_state == NetworkServiceRecordState.FAILED:
- # If the NS is already active and we entered scaling_in, scaling_out,
- # do not mark the NS as failing if scaling operation failed.
- if curr_state in [NetworkServiceRecordState.SCALING_OUT,
- NetworkServiceRecordState.SCALING_IN] and self._is_active:
- new_state = NetworkServiceRecordState.RUNNING
- self.set_state(new_state)
- else:
- yield from self.instantiation_failed()
+ vlr_local = self.vlrs[vlr.id]
+
+ if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
+ if vlr.operational_status == 'running':
+ vlr_local.set_state_from_op_status(vlr.operational_status)
+ self._active_networks += 1
+ self._log.info("VLR %s:%s moving to active state",
+ vlr.id,vlr.name)
+ elif vlr.operational_status == 'failed':
+ vlr_local.set_state_from_op_status(vlr.operational_status)
+ vlr_local.state_failed_reason = vlr.operational_status_details
+ asyncio.ensure_future(self.update_state(), loop=self._loop)
+ self._log.info("VLR %s:%s moving to failed state",
+ vlr.id,vlr.name)
else:
- self.set_state(new_state)
+ self._log.warning("VLR %s:%s received state:%s",
+ vlr.id, vlr.name, vlr.operational_status)
- yield from self.publish()
+ if isinstance(self.nsm_plugin, rwnsmplugin.RwNsPlugin):
+ self.vl_instantiation_successful()
+
+ # self.update_state() is responsible for publishing the NSR state. Its being called by vlr_event and update_vnfr.
+ # The call from vlr_event occurs only if vlr reaches a failed state. Hence implementing the check here to handle
+ # ns terminate received after other vlr states as vl-alloc-pending, vl-init, running.
+ if self._ns_terminate_received:
+ # Resetting this flag so that terminate ns is not called via subsequent DTS Handlers after the intial call.
+ if vlr.operational_status in ['running', 'failed']:
+ self._ns_terminate_received = False
+ asyncio.ensure_future(self.terminate_ns_cont(), loop=self._loop)
class InputParameterSubstitution(object):
This class is responsible for substituting input parameters into an NSD.
"""
- def __init__(self, log):
+ def __init__(self, log, project):
"""Create an instance of InputParameterSubstitution
Arguments:
"""
self.log = log
+ self.project = project
+
+ def _fix_xpath(self, xpath):
+ # Fix the parameter.xpath to include project and correct namespace
+ self.log.debug("Provided xpath: {}".format(xpath))
+ #Split the xpath at the /
+ attrs = xpath.split('/')
+ new_xp = attrs[0]
+ for attr in attrs[1:]:
+ new_ns = 'project-nsd'
+ name = attr
+ if ':' in attr:
+ # Includes namespace
+ ns, name = attr.split(':', 2)
+ if ns == "rw-nsd":
+ ns = "rw-project-nsd"
+
+ new_xp = new_xp + '/' + new_ns + ':' + name
+
+ updated_xpath = self.project.add_project(new_xp)
+
+ self.log.error("Updated xpath: {}".format(updated_xpath))
+ return updated_xpath
def __call__(self, nsd, nsr_config):
"""Substitutes input parameters from the NSR config into the NSD
)
try:
- xpath.setxattr(nsd, param.xpath, param.value)
+ xp = self._fix_xpath(param.xpath)
+ xpath.setxattr(nsd, xp, param.value)
except Exception as e:
self.log.exception(e)
+class VnfInputParameterSubstitution(object):
+ """
+ This class is responsible for substituting input parameters into a VNFD.
+ """
+
+ def __init__(self, log, const_vnfd, project):
+ """Create an instance of VnfInputParameterSubstitution
+
+ Arguments:
+ log - a logger for this object to use
+ const_vnfd - id refs for vnfs in a ns
+ project - project for the VNFs
+ """
+
+ self.log = log
+ self.member_vnf_index = const_vnfd.member_vnf_index
+ self.vnfd_id_ref = const_vnfd.vnfd_id_ref
+ self.project = project
+
+ def __call__(self, vnfr, nsr_config):
+ """Substitutes vnf input parameters from the NSR config into the VNFD
+
+ This call modifies the provided VNFD with the input parameters that are
+ contained in the NSR config.
+
+ Arguments:
+ vnfr - a GI VNFR object
+ nsr_config - a GI NSR Config object
+
+ """
+
+ def compose_xpath(xpath, id):
+ prefix = "/rw-project:project[rw-project:name={}]".format(quoted_key(self.project.name)) + \
+ "/vnfr:vnfr-catalog/vnfr:vnfr[vnfr:id={}]/vnfr:vnfd/".format(quoted_key(id))
+
+ suffix = '/'.join(xpath.split('/')[3:]).replace('vnfd', 'vnfr')
+ return prefix + suffix
+
+ def substitute_xpath(ip_xpath, substitute_value, vnfr):
+ vnfr_xpath = compose_xpath(ip_xpath, vnfr.id)
+
+ try:
+ verify_xpath_wildcarded = xpath.getxattr(vnfr, vnfr_xpath)
+
+ self.log.debug(
+ "vnf-input-parameter:{} = {}, for VNF : [member-vnf-index : {}, vnfd-id-ref : {}]".format(
+ ip_xpath,
+ substitute_value,
+ self.member_vnf_index,
+ self.vnfd_id_ref
+ )
+ )
+ try:
+ xpath.setxattr(vnfr, vnfr_xpath, substitute_value)
+
+ except Exception as e:
+ self.log.exception(e)
+
+ except Exception as e:
+ self.log.exception("Wildcarded xpath {} is listy in nature. Can not update. Exception => {}"
+ .format(ip_xpath, e))
+
+ if vnfr is None or nsr_config is None:
+ return
+
+ optional_input_parameters = set()
+ for input_parameter in nsr_config.nsd.input_parameter_xpath:
+ optional_input_parameters.add(input_parameter.xpath)
+
+ # Apply the input parameters to the vnfr
+ if nsr_config.vnf_input_parameter:
+ for param in nsr_config.vnf_input_parameter:
+ if (param.member_vnf_index_ref == self.member_vnf_index and param.vnfd_id_ref == self.vnfd_id_ref):
+ if param.input_parameter:
+ for ip in param.input_parameter:
+ if ip.xpath not in optional_input_parameters:
+ msg = "Substitution Failed. Tried to set an invalid vnf input parameter ({}) for vnf [member-vnf-index : {}, vnfd-id-ref : {}]"
+ self.log.error(msg.format(ip.xpath, self.member_vnf_index, self.vnfd_id_ref))
+ continue
+
+ try:
+ substitute_xpath(ip.xpath, ip.value, vnfr)
+ except Exception as e:
+ self.log.exception(e)
+ else:
+ self.log.debug("Substituting Xpaths with default Values")
+ for input_parameter in nsr_config.nsd.input_parameter_xpath:
+ if input_parameter.default_value is not None:
+ try:
+ if "vnfd-catalog" in input_parameter.xpath:
+ substitute_xpath(input_parameter.xpath, input_parameter.default_value, vnfr)
+ except Exception as e:
+ self.log.exception(e)
+
+
class NetworkServiceDescriptor(object):
"""
Network service descriptor class
@staticmethod
def path_for_id(nsd_id):
""" Return path for the passed nsd_id"""
- return "C,/nsd:nsd-catalog/nsd:nsd[nsd:id = '{}'".format(nsd_id)
+ return self._nsm._project.add_project(
+ "C,/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id = '{}'".
+ format(nsd_id))
def path(self):
""" Return the message associated with this NetworkServiceDescriptor"""
class NsdDtsHandler(object):
""" The network service descriptor DTS handler """
- XPATH = "C,/nsd:nsd-catalog/nsd:nsd"
+ XPATH = "C,/project-nsd:nsd-catalog/project-nsd:nsd"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
def register(self):
""" Register for Nsd create/update/delete/read requests from dts """
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
self._log.debug("Got nsd apply cfg (xact:%s) (action:%s)",
xact, action)
- # Create/Update an NSD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those NSD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('nsds', []) or is_recovery:
- self._nsm.update_nsd(cfg)
- scratch.pop('nsds', None)
+ if self._regh:
+ # Create/Update an NSD record
+ for cfg in self._regh.get_xact_elements(xact):
+ # Only interested in those NSD cfgs whose ID was received in prepare callback
+ if cfg.id in scratch.get('nsds', []) or is_recovery:
+ self._nsm.update_nsd(cfg)
- return RwTypes.RwStatus.SUCCESS
+ else:
+ # This can happen if we do the deregister
+ # during project delete before this is called
+ self._log.debug("No reg handle for {} for project {}".
+ format(self.__class__, self._project.name))
- @asyncio.coroutine
- def delete_nsd_libs(nsd_id):
- """ Remove any files uploaded with NSD and stored under $RIFT_ARTIFACTS/libs/<id> """
- try:
- rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
- nsd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', nsd_id)
+ scratch.pop('nsds', None)
- if os.path.exists (nsd_dir):
- shutil.rmtree(nsd_dir, ignore_errors=True)
- except Exception as e:
- self._log.error("Exception in cleaning up NSD libs {}: {}".
- format(nsd_id, e))
- self._log.excpetion(e)
+ return RwTypes.RwStatus.SUCCESS
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
if fref.is_field_deleted():
# Delete an NSD record
self._log.debug("Deleting NSD with id %s", msg.id)
- yield from delete_nsd_libs(msg.id)
self._nsm.delete_nsd(msg.id)
else:
# Add this NSD to scratch to create/update in apply callback
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self._project.add_project(NsdDtsHandler.XPATH)
self._log.debug(
"Registering for NSD config using xpath: %s",
- NsdDtsHandler.XPATH,
+ xpath,
)
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
# Need a list in scratch to store NSDs to create/update later
# acg._scratch['nsds'] = list()
self._regh = acg.register(
- xpath=NsdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register NSD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
self._loop = loop
self._nsm = nsm
self._regh = None
+ self._project = nsm._project
@property
def regh(self):
def register(self):
""" Register for VNFD configuration"""
+ if self._regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
@asyncio.coroutine
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
self._log.debug("Got NSM VNFD apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
- # Create/Update a VNFD record
- for cfg in self._regh.get_xact_elements(xact):
- # Only interested in those VNFD cfgs whose ID was received in prepare callback
- if cfg.id in scratch.get('vnfds', []):
- self._nsm.update_vnfd(cfg)
+ is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+
+ if self._regh:
+ # Create/Update a VNFD record
+ for cfg in self._regh.get_xact_elements(xact):
+ # Only interested in those VNFD cfgs whose ID was received in prepare callback
+ if cfg.id in scratch.get('vnfds', []) or is_recovery:
+ self._nsm.update_vnfd(cfg)
- for cfg in self._regh.elements:
- if cfg.id in scratch.get('deleted_vnfds', []):
- yield from self._nsm.delete_vnfd(cfg.id)
+ for cfg in self._regh.elements:
+ if cfg.id in scratch.get('deleted_vnfds', []):
+ yield from self._nsm.delete_vnfd(cfg.id)
+
+ else:
+ self._log.warning("Reg handle none for {} in project {}".
+ format(self.__class__, self._project))
scratch.pop('vnfds', None)
scratch.pop('deleted_vnfds', None)
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
+ xpath = ks_path.to_xpath(NsdYang.get_schema())
self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)",
- ks_path.to_xpath(RwNsmYang.get_schema()), xact_info.query_action, msg)
+ xpath, xact_info.query_action, msg)
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
vnfds = scratch.setdefault('vnfds', [])
vnfds.append(msg.id)
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning(
+ "VnfdDtsHandler in project {} with path {} for action {} failed: {}".
+ format(self._project, xpath, xact_info.query_action, e))
+
+ xpath = self._project.add_project(VnfdDtsHandler.XPATH)
self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
+ "Registering for VNFD config using xpath {} for project {}"
+ .format(xpath, self._project))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
# Need a list in scratch to store VNFDs to create/update later
# acg._scratch['vnfds'] = list()
# acg._scratch['deleted_vnfds'] = list()
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
+ def deregister(self):
+ self._log.debug("De-register VNFD handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+
class NsrRpcDtsHandler(object):
""" The network service instantiation RPC DTS handler """
EXEC_NSR_CONF_XPATH = "I,/nsr:start-network-service"
EXEC_NSR_CONF_O_XPATH = "O,/nsr:start-network-service"
NETCONF_IP_ADDRESS = "127.0.0.1"
NETCONF_PORT = 2022
- RESTCONF_PORT = 8888
- NETCONF_USER = "admin"
- NETCONF_PW = "admin"
- REST_BASE_V2_URL = 'https://{}:{}/v2/api/'.format("127.0.0.1",8888)
+ RESTCONF_PORT = 8008
+ NETCONF_USER = "@rift"
+ NETCONF_PW = "rift"
+ REST_BASE_V2_URL = 'https://{}:{}/v2/api/'.format("127.0.0.1",
+ RESTCONF_PORT)
def __init__(self, dts, log, loop, nsm):
self._dts = dts
self._log = log
self._loop = loop
self._nsm = nsm
+ self._project = nsm._project
self._nsd = None
self._ns_regh = None
self._manager = None
- self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + 'config/ns-instance-config'
+ self._nsr_config_url = NsrRpcDtsHandler.REST_BASE_V2_URL + \
+ 'project/{}/'.format(self._project) + \
+ 'config/ns-instance-config'
self._model = RwYang.Model.create_libncx()
self._model.load_schema_ypbc(RwNsrYang.get_schema())
timeout_secs)
def _apply_ns_instance_config(self,payload_dict):
- #self._log.debug("At apply NS instance config with payload %s",payload_dict)
req_hdr= {'accept':'application/vnd.yang.data+json','content-type':'application/vnd.yang.data+json'}
- response=requests.post(self._nsr_config_url, headers=req_hdr, auth=('admin', 'admin'),data=payload_dict,verify=False)
+ response=requests.post(self._nsr_config_url,
+ headers=req_hdr,
+ auth=(NsrRpcDtsHandler.NETCONF_USER, NsrRpcDtsHandler.NETCONF_PW),
+ data=payload_dict,
+ verify=False)
return response
@asyncio.coroutine
def register(self):
""" Register for NS monitoring read from dts """
+
@asyncio.coroutine
def on_ns_config_prepare(xact_info, action, ks_path, msg):
""" prepare callback from dts start-network-service"""
assert action == rwdts.QueryAction.RPC
+
+ if not self._project.rpc_check(msg, xact_info):
+ return
+
rpc_ip = msg
rpc_op = NsrYang.YangOutput_Nsr_StartNetworkService.from_dict({
"nsr_id":str(uuid.uuid4())
})
- if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
- self._log.error("Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".format(rpc_ip))
-
+ if not ('name' in rpc_ip and 'nsd_ref' in rpc_ip and
+ ('cloud_account' in rpc_ip or 'om_datacenter' in rpc_ip)):
+ errmsg = (
+ "Mandatory parameters name or nsd_ref or cloud account not found in start-network-service {}".
+ format(rpc_ip))
+ self._log.error(errmsg)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH,
+ errmsg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK,
+ NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH)
+ return
self._log.debug("start-network-service RPC input: {}".format(rpc_ip))
nsd_copy = self.nsm.get_nsd(rpc_ip.nsd_ref)
- #if not self._manager:
- # self._manager = yield from self._connect()
-
self._log.debug("Configuring ns-instance-config with name %s nsd-ref: %s",
rpc_ip.name, rpc_ip.nsd_ref)
ns_instance_config_dict = {"id":rpc_op.nsr_id, "admin_status":"ENABLED"}
ns_instance_config_copy_dict = {k:v for k, v in rpc_ip.as_dict().items()
- if k in RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr().fields}
+ if k in RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr().fields}
ns_instance_config_dict.update(ns_instance_config_copy_dict)
- ns_instance_config = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
- ns_instance_config.nsd = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ ns_instance_config = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.from_dict(ns_instance_config_dict)
+ ns_instance_config.nsd = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
ns_instance_config.nsd.from_dict(nsd_copy.msg.as_dict())
payload_dict = ns_instance_config.to_json(self._model)
- #xml = ns_instance_config.to_xml_v2(self._model)
- #netconf_xml = self.wrap_netconf_config_xml(xml)
- #self._log.debug("Sending configure ns-instance-config xml to %s: %s",
- # netconf_xml, NsrRpcDtsHandler.NETCONF_IP_ADDRESS)
self._log.debug("Sending configure ns-instance-config json to %s: %s",
self._nsr_config_url,ns_instance_config)
- #response = yield from self._manager.edit_config(
- # target="running",
- # config=netconf_xml,
- # )
response = yield from self._loop.run_in_executor(
None,
self._apply_ns_instance_config,
NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH,
rpc_op)
except Exception as e:
- self._log.error("Exception processing the "
- "start-network-service: {}".format(e))
- self._log.exception(e)
+ errmsg = ("Exception processing the "
+ "start-network-service: {}".format(e))
+ self._log.exception(errmsg)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH,
+ errmsg)
xact_info.respond_xpath(rwdts.XactRspCode.NACK,
NsrRpcDtsHandler.EXEC_NSR_CONF_O_XPATH)
+ self._ns_regh = yield from self._dts.register(
+ xpath=NsrRpcDtsHandler.EXEC_NSR_CONF_XPATH,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_ns_config_prepare),
+ flags=rwdts.Flag.PUBLISHER,
+ )
- hdl_ns = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_ns_config_prepare,)
-
- with self._dts.group_create() as group:
- self._ns_regh = group.register(xpath=NsrRpcDtsHandler.EXEC_NSR_CONF_XPATH,
- handler=hdl_ns,
- flags=rwdts.Flag.PUBLISHER,
- )
+ def deregister(self):
+ if self._ns_regh:
+ self._ns_regh.deregister()
+ self._ns_regh = None
class NsrDtsHandler(object):
self._log = log
self._loop = loop
self._nsm = nsm
+ self._project = self._nsm._project
self._nsr_regh = None
self._scale_regh = None
def register(self):
""" Register for Nsr create/update/delete/read requests from dts """
+ if self._nsr_regh:
+ self._log.warning("DTS handler already registered for project {}".
+ format(self._project.name))
+ return
+
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
for vld in vl_delta["deleted"]:
yield from self._nsm.nsr_terminate_vl(nsr_id, vld)
- def get_add_delete_update_cfgs(dts_member_reg, xact, key_name, scratch):
- # Unfortunately, it is currently difficult to figure out what has exactly
- # changed in this xact without Pbdelta support (RIFT-4916)
- # As a workaround, we can fetch the pre and post xact elements and
- # perform a comparison to figure out adds/deletes/updates
- xact_cfgs = list(dts_member_reg.get_xact_elements(xact))
- curr_cfgs = list(dts_member_reg.elements)
-
- xact_key_map = {getattr(cfg, key_name): cfg for cfg in xact_cfgs}
- curr_key_map = {getattr(cfg, key_name): cfg for cfg in curr_cfgs}
-
- # Find Adds
- added_keys = set(xact_key_map) - set(curr_key_map)
- added_cfgs = [xact_key_map[key] for key in added_keys]
-
- # Find Deletes
- deleted_keys = set(curr_key_map) - set(xact_key_map)
- deleted_cfgs = [curr_key_map[key] for key in deleted_keys]
-
- # Find Updates
- updated_keys = set(curr_key_map) & set(xact_key_map)
- updated_cfgs = [xact_key_map[key] for key in updated_keys
- if xact_key_map[key] != curr_key_map[key]]
-
- return added_cfgs, deleted_cfgs, updated_cfgs
-
def get_nsr_key_pairs(dts_member_reg, xact):
key_pairs = {}
for instance_cfg, keyspec in dts_member_reg.get_xact_elements(xact, include_keyspec=True):
self._log.debug("Got nsr apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
+ @asyncio.coroutine
def handle_create_nsr(msg, key_pairs=None, restart_mode=False):
# Handle create nsr requests """
# Do some validations
self._log.debug("Creating NetworkServiceRecord %s from nsr config %s",
msg.id, msg.as_dict())
- nsr = self.nsm.create_nsr(msg, key_pairs=key_pairs, restart_mode=restart_mode)
+ nsr = yield from self.nsm.create_nsr(msg,
+ xact,
+ key_pairs=key_pairs,
+ restart_mode=restart_mode)
return nsr
def handle_delete_nsr(msg):
@asyncio.coroutine
def delete_instantiation(ns_id):
""" Delete instantiation """
- with self._dts.transaction() as xact:
- yield from self._nsm.terminate_ns(ns_id, xact)
+ yield from self._nsm.terminate_ns(ns_id, None)
# Handle delete NSR requests
self._log.info("Delete req for NSR Id: %s received", msg.id)
nsr = self._nsm.get_ns_by_nsr_id(msg.id)
nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
- event_descr = "Terminate rcvd for NS Id:%s" % msg.id
+ event_descr = "Terminate rcvd for NS Id: %s, NS Name: %s" % (msg.id, msg.name)
nsr.record_event("terminate-rcvd", event_descr)
self._loop.create_task(delete_instantiation(msg.id))
def begin_instantiation(nsr):
# Begin instantiation
self._log.info("Beginning NS instantiation: %s", nsr.id)
- yield from self._nsm.instantiate_ns(nsr.id, xact)
+ try:
+ yield from self._nsm.instantiate_ns(nsr.id, xact)
+ except Exception as e:
+ self._log.exception(e)
+ raise e
+
+ @asyncio.coroutine
+ def instantiate_ns(msg, key_pairs, restart_mode=False):
+ nsr = yield from handle_create_nsr(msg, key_pairs, restart_mode=restart_mode)
+ yield from begin_instantiation(nsr)
- def on_instantiate_done(fut):
+ def on_instantiate_done(fut, msg):
# If the do_instantiate fails, then publish NSR with failed result
e = fut.exception()
if e is not None:
if action == rwdts.AppconfAction.INSTALL and xact.id is None:
key_pairs = []
- for element in self._key_pair_regh.elements:
- key_pairs.append(element)
- for element in self._nsr_regh.elements:
- nsr = handle_create_nsr(element, key_pairs, restart_mode=True)
- instantiate_task = self._loop.create_task(begin_instantiation(nsr))
- instantiate_task.add_done_callback(on_instantiate_done)
+ if self._key_pair_regh:
+ for element in self._key_pair_regh.elements:
+ key_pairs.append(element)
+ else:
+ self._log.error("Reg handle none for key pair in project {}".
+ format(self._project))
+
+ if self._nsr_regh:
+ for element in self._nsr_regh.elements:
+ if element.id not in self.nsm._nsrs:
+ instantiate_task = self._loop.create_task(instantiate_ns(element, key_pairs,
+ restart_mode=True))
+ instantiate_task.add_done_callback(functools.partial(on_instantiate_done, msg=element))
+ else:
+ self._log.error("Reg handle none for NSR in project {}".
+ format(self._project))
+ return RwTypes.RwStatus.SUCCESS
(added_msgs, deleted_msgs, updated_msgs) = get_add_delete_update_cfgs(self._nsr_regh,
xact,
- "id",
- scratch)
+ "id")
self._log.debug("Added: %s, Deleted: %s, Updated: %s", added_msgs,
deleted_msgs, updated_msgs)
if msg.id not in self._nsm.nsrs:
self._log.info("Create NSR received in on_apply to instantiate NS:%s", msg.id)
key_pairs = get_nsr_key_pairs(self._key_pair_regh, xact)
- nsr = handle_create_nsr(msg,key_pairs)
- instantiate_task = self._loop.create_task(begin_instantiation(nsr))
- instantiate_task.add_done_callback(on_instantiate_done)
+ instantiate_task = self._loop.create_task(instantiate_ns(msg,key_pairs))
+ instantiate_task.add_done_callback(functools.partial(on_instantiate_done, msg=msg))
for msg in deleted_msgs:
self._log.info("Delete NSR received in on_apply to terminate NS:%s", msg.id)
for msg in updated_msgs:
self._log.info("Update NSR received in on_apply: %s", msg)
-
self._nsm.nsr_update_cfg(msg.id, msg)
if 'nsd' in msg:
xact, action, xact_info, xpath, msg
)
- @asyncio.coroutine
- def delete_instantiation(ns_id):
- """ Delete instantiation """
- yield from self._nsm.terminate_ns(ns_id, None)
-
- def handle_delete_nsr():
- """ Handle delete NSR requests """
- self._log.info("Delete req for NSR Id: %s received", msg.id)
- # Terminate the NSR instance
- nsr = self._nsm.get_ns_by_nsr_id(msg.id)
-
- nsr.set_state(NetworkServiceRecordState.TERMINATE_RCVD)
- event_descr = "Terminate rcvd for NS Id:%s" % msg.id
- nsr.record_event("terminate-rcvd", event_descr)
-
- self._loop.create_task(delete_instantiation(msg.id))
-
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
+ def send_err_msg(err_msg):
+ self._log.error(errmsg)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ xpath,
+ errmsg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
+
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE, rwdts.QueryAction.DELETE]:
# if this is an NSR create
if action != rwdts.QueryAction.DELETE and msg.id not in self._nsm.nsrs:
# Ensure the Cloud account/datacenter has been specified
- if not msg.has_field("cloud_account") and not msg.has_field("om_datacenter"):
- raise NsrInstantiationFailed("Cloud account or datacenter not specified in NSR")
+ if not msg.has_field("datacenter") and not msg.has_field("datacenter"):
+ errmsg = ("Cloud account or datacenter not specified in NS {}".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
# Check if nsd is specified
if not msg.has_field("nsd"):
- raise NsrInstantiationFailed("NSD not specified in NSR")
+ errmsg = ("NSD not specified in NS {}".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
else:
nsr = self._nsm.nsrs[msg.id]
-
if msg.has_field("nsd"):
if nsr.state != NetworkServiceRecordState.RUNNING:
- raise NsrVlUpdateError("Unable to update VL when NSR not in running state")
+ errmsg = ("Unable to update VL when NS {} not in running state".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
+
if 'vld' not in msg.nsd or len(msg.nsd.vld) == 0:
- raise NsrVlUpdateError("NS config NSD should have atleast 1 VLD defined")
+ errmsg = ("NS config {} NSD should have atleast 1 VLD".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
if msg.has_field("scaling_group"):
self._log.debug("ScaleMsg %s", msg)
self._log.debug("NSSCALINGSTATE %s", nsr.state)
if nsr.state != NetworkServiceRecordState.RUNNING:
- raise ScalingOperationError("Unable to perform scaling action when NS is not in running state")
+ errmsg = ("Unable to perform scaling action when NS {} not in running state".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
if len(msg.scaling_group) > 1:
- raise ScalingOperationError("Only a single scaling group can be configured at a time")
+ errmsg = ("Only a single scaling group can be configured at a time for NS {}".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
for group_msg in msg.scaling_group:
num_new_group_instances = len(group_msg.instance)
if num_new_group_instances > 1:
- raise ScalingOperationError("Only a single scaling instance can be modified at a time")
+ errmsg = ("Only a single scaling instance can be modified at a time for NS {}".
+ format(msg.name))
+ send_err_msg(errmsg)
+ return
elif num_new_group_instances == 1:
scale_group = nsr.scaling_groups[group_msg.scaling_group_name_ref]
if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
if len(scale_group.instances) == scale_group.max_instance_count:
- raise ScalingOperationError("Max instances for %s reached" % scale_group)
+ errmsg = (" Max instances for {} reached for NS {}".
+ format(str(scale_group), msg.name))
+ send_err_msg(errmsg)
+ return
acg.handle.prepare_complete_ok(xact_info.handle)
- self._log.debug("Registering for NSR config using xpath: %s",
- NsrDtsHandler.NSR_XPATH)
+ xpath = self._project.add_project(NsrDtsHandler.NSR_XPATH)
+ self._log.debug("Registering for NSR config using xpath: {}".
+ format(xpath))
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
- self._nsr_regh = acg.register(xpath=NsrDtsHandler.NSR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- on_prepare=on_prepare)
+ self._nsr_regh = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ on_prepare=on_prepare
+ )
self._scale_regh = acg.register(
- xpath=NsrDtsHandler.SCALE_INSTANCE_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
- )
+ xpath=self._project.add_project(NsrDtsHandler.SCALE_INSTANCE_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY| rwdts.Flag.CACHE,
+ )
self._key_pair_regh = acg.register(
- xpath=NsrDtsHandler.KEY_PAIR_XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
- )
-
-
-class NsrOpDataDtsHandler(object):
- """ The network service op data DTS handler """
- XPATH = "D,/nsr:ns-instance-opdata/nsr:nsr"
-
- def __init__(self, dts, log, loop, nsm):
- self._dts = dts
- self._log = log
- self._loop = loop
- self._nsm = nsm
- self._regh = None
-
- @property
- def regh(self):
- """ Return the registration handle"""
- return self._regh
-
- @property
- def nsm(self):
- """ Return the NS manager instance """
- return self._nsm
-
- @asyncio.coroutine
- def register(self):
- """ Register for Nsr op data publisher registration"""
- self._log.debug("Registering Nsr op data path %s as publisher",
- NsrOpDataDtsHandler.XPATH)
-
- hdl = rift.tasklets.DTS.RegistrationHandler()
- handlers = rift.tasklets.Group.Handler()
- with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=NsrOpDataDtsHandler.XPATH,
- handler=hdl,
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ | rwdts.Flag.DATASTORE)
-
- @asyncio.coroutine
- def create(self, path, msg):
- """
- Create an NS record in DTS with the path and message
- """
- self._log.debug("Creating NSR %s:%s", path, msg)
- self.regh.create_element(path, msg)
- self._log.debug("Created NSR, %s:%s", path, msg)
-
- @asyncio.coroutine
- def update(self, path, msg, flags=rwdts.XactFlag.REPLACE):
- """
- Update an NS record in DTS with the path and message
- """
- self._log.debug("Updating NSR, %s:%s regh = %s", path, msg, self.regh)
- self.regh.update_element(path, msg, flags)
- self._log.debug("Updated NSR, %s:%s", path, msg)
+ xpath=self._project.add_project(NsrDtsHandler.KEY_PAIR_XPATH),
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY | rwdts.Flag.CACHE,
+ )
- @asyncio.coroutine
- def delete(self, path):
- """
- Update an NS record in DTS with the path and message
- """
- self._log.debug("Deleting NSR path:%s", path)
- self.regh.delete_element(path)
- self._log.debug("Deleted NSR path:%s", path)
+ def deregister(self):
+ self._log.debug("De-register NSR config for project {}".
+ format(self._project.name))
+ if self._nsr_regh:
+ self._nsr_regh.deregister()
+ self._nsr_regh = None
+ if self._scale_regh:
+ self._scale_regh.deregister()
+ self._scale_regh = None
+ if self._key_pair_regh:
+ self._key_pair_regh.deregister()
+ self._key_pair_regh = None
class VnfrDtsHandler(object):
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/ advises from dts """
-
- def on_commit(xact_info):
- """ The transaction has been committed """
- self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
- return rwdts.MemberRspCode.ACTION_OK
+ if self._regh:
+ self._log.warning("VNFR DTS handler already registered for project {}".
+ format(self._project.name))
+ return
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
xact_info, action, ks_path, msg
)
- schema = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
- if path_entry.key00.id not in self._nsm._vnfrs:
- self._log.error("%s request for non existent record path %s",
+ if not path_entry or (path_entry.key00.id not in self._nsm._vnfrs):
+ # This can happen when using external RO or after delete with monitoring params
+ self._log.debug("%s request for non existent record path %s",
action, xpath)
xact_info.respond_xpath(rwdts.XactRspCode.NA, xpath)
return
- self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
yield from self._nsm.update_vnfr(msg)
elif action == rwdts.QueryAction.DELETE:
self._log.debug("Deleting VNFR with id %s", path_entry.key00.id)
+
self._nsm.delete_vnfr(path_entry.key00.id)
xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath)
self._log.debug("Registering for VNFR using xpath: %s",
- VnfrDtsHandler.XPATH,)
+ VnfrDtsHandler.XPATH)
- hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
- on_prepare=on_prepare,)
+ hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._nsm._project.add_project(
+ VnfrDtsHandler.XPATH),
handler=hdl,
flags=(rwdts.Flag.SUBSCRIBER),)
+ def deregister(self):
+ self._log.debug("De-register VNFR for project {}".
+ format(self._nsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
class NsManager(object):
""" The Network Service Manager class"""
- def __init__(self, dts, log, loop,
+ def __init__(self, dts, log, loop, project,
nsr_handler, vnfr_handler, vlr_handler, ro_plugin_selector,
vnffgmgr, vnfd_pub_handler, cloud_account_handler):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_handler = nsr_handler
self._vnfr_pub_handler = vnfr_handler
self._vlr_pub_handler = vlr_handler
# Intialize the set of variables for implementing Scaling RPC using REST.
self._headers = {"content-type":"application/json", "accept":"application/json"}
- #This will break when we have rbac in the rift code and admin user password is changed or admin it self is removed.
- self._user = 'admin'
- self._password = 'admin'
+ self._user = '@rift'
+ self._password = 'rift'
self._ip = 'localhost'
self._rport = 8008
- self._conf_url = "https://{ip}:{port}/api/config". \
+ self._conf_url = "https://{ip}:{port}/api/config/project/{project}". \
format(ip=self._ip,
- port=self._rport)
+ port=self._rport,
+ project=self._project.name)
self._nsrs = {}
self._nsds = {}
self._vnfds = {}
self._vnfrs = {}
+ self._nsr_for_vlr = {}
self.cfgmgr_obj = conman.ROConfigManager(log, loop, dts, self)
self._dts_handlers = [self._nsd_dts_handler,
VnfrDtsHandler(dts, log, loop, self),
NsrDtsHandler(dts, log, loop, self),
- ScalingRpcHandler(log, dts, loop, self.scale_rpc_callback),
- NsrRpcDtsHandler(dts,log,loop,self),
+ ScalingRpcHandler(log, dts, loop, self, self.scale_rpc_callback),
+ # NsrRpcDtsHandler(dts, log, loop, self),
self._vnfd_dts_handler,
self.cfgmgr_obj,
]
@asyncio.coroutine
def register(self):
""" Register all static DTS handlers """
+ self._log.debug("Register DTS handlers for project {}".format(self._project))
for dts_handle in self._dts_handlers:
- yield from dts_handle.register()
+ if asyncio.iscoroutinefunction(dts_handle.register):
+ yield from dts_handle.register()
+ else:
+ dts_handle.register()
+
+ def deregister(self):
+ """ Register all static DTS handlers """
+ for dts_handle in self._dts_handlers:
+ dts_handle.deregister()
def get_ns_by_nsr_id(self, nsr_id):
def get_scaling_group_information():
scaling_group_url = "{url}/ns-instance-config/nsr/{nsr_id}".format(url=self._conf_url, nsr_id=msg.nsr_id_ref)
output = requests.get(scaling_group_url, headers=self._headers, auth=(self._user, self._password), verify=False)
- if output.text == None or len(output.text) == 0:
+ if output.text is None or len(output.text) == 0:
self.log.error("nsr id %s information not present", self._nsr_id)
return None
scaling_group_info = json.loads(output.text)
def config_scaling_group_information(scaling_group_info):
data_str = json.dumps(scaling_group_info)
- self.log.debug("scaling group Info %s", data_str)
scale_out_url = "{url}/ns-instance-config/nsr/{nsr_id}".format(url=self._conf_url, nsr_id=msg.nsr_id_ref)
- response = requests.put(scale_out_url, data=data_str, verify=False, auth=(self._user, self._password), headers=self._headers)
+ response = requests.put(scale_out_url, data=data_str, verify=False,
+ auth=(self._user, self._password), headers=self._headers)
response.raise_for_status()
def scale_out():
scaling_group_info = get_scaling_group_information()
+ self._log.debug("Scale out info: {}".format(scaling_group_info))
if scaling_group_info is None:
return
scaling_group["instance"].append({"id": int(msg.instance_id)})
if not scaling_group_present:
- scaling_group_info["nsr:nsr"]["scaling-group"] = [{"scaling-group-name-ref": msg.scaling_group_name_ref, "instance": [{"id": msg.instance_id}]}]
+ scaling_group_info["nsr:nsr"]["scaling-group"] = [{"scaling-group-name-ref": msg.scaling_group_name_ref,
+ "instance": [{"id": msg.instance_id}]}]
config_scaling_group_information(scaling_group_info)
return
nsr.nsr_cfg_msg= msg
def nsr_instantiate_vl(self, nsr_id, vld):
- self.log.debug("NSR {} create VL {}".format(nsr_id, vld))
+ self.log.error("NSR {} create VL {}".format(nsr_id, vld))
nsr = self._nsrs[nsr_id]
if nsr.state != NetworkServiceRecordState.RUNNING:
raise NsrVlUpdateError("Cannot perform VL instantiate if NSR is not in running state")
# Not calling in a separate task as this is called from a separate task
yield from nsr.delete_vl_instance(vld)
- def create_nsr(self, nsr_msg, key_pairs=None,restart_mode=False):
+ @asyncio.coroutine
+ def create_nsr(self, nsr_msg, config_xact, key_pairs=None,restart_mode=False):
""" Create an NSR instance """
self._log.debug("NSRMSG %s", nsr_msg)
if nsr_msg.id in self._nsrs:
self._log.error(msg)
raise NetworkServiceRecordError(msg)
- self._log.info("Create NetworkServiceRecord nsr id %s from nsd_id %s",
+ self._log.debug("Create NetworkServiceRecord nsr id %s from nsd_id %s",
nsr_msg.id,
nsr_msg.nsd.id)
- nsm_plugin = self._ro_plugin_selector.ro_plugin
- sdn_account_name = self._cloud_account_handler.get_cloud_account_sdn_name(nsr_msg.cloud_account)
+ nsm_plugin = self._ro_plugin_selector.get_ro_plugin(nsr_msg.resource_orchestrator)
+ #Work Around - openmano expects datacenter id instead of datacenter name
+ if isinstance(nsm_plugin, openmano_nsm.OpenmanoNsPlugin):
+ for uuid, name in nsm_plugin._cli_api.datacenter_list():
+ if name == nsr_msg.datacenter:
+ nsr_msg.datacenter = uuid
+
+ sdn_account_name = self._cloud_account_handler.get_cloud_account_sdn_name(nsr_msg.datacenter)
nsr = NetworkServiceRecord(self._dts,
self._log,
nsr_msg,
sdn_account_name,
key_pairs,
+ self._project,
restart_mode=restart_mode,
- vlr_handler=self._ro_plugin_selector._records_publisher._vlr_pub_hdlr
+ vlr_handler=self._vlr_pub_handler
)
self._nsrs[nsr_msg.id] = nsr
- nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd, key_pairs)
+
+ try:
+ # Generate ssh key pair if required
+ nsr.generate_ssh_key_pair(config_xact)
+ except Exception as e:
+ self._log.exception("SSH key: {}".format(e))
+
+ self._log.debug("NSR {}: SSh key generated: {}".format(nsr_msg.name,
+ nsr.public_key))
+
+ ssh_key = {'private_key': nsr.private_key,
+ 'public_key': nsr.public_key
+ }
+
+ nsm_plugin.create_nsr(nsr_msg, nsr_msg.nsd, key_pairs, ssh_key=ssh_key)
return nsr
raise NetworkServiceRecordError(err)
nsr = self._nsrs[nsr_id]
- yield from nsr.nsm_plugin.instantiate_ns(nsr, config_xact)
+ try:
+ yield from nsr.nsm_plugin.instantiate_ns(nsr, config_xact)
+ except Exception as e:
+ self._log.exception("NS instantiate: {}".format(e))
+ raise e
@asyncio.coroutine
def update_vnfr(self, vnfr):
vnfr_state = self._vnfrs[vnfr.id].state
self._log.debug("Updating VNFR with state %s: vnfr %s", vnfr_state, vnfr)
-
+
+ no_of_active_vms = 0
+ for vdur in vnfr.vdur:
+ if vdur.operational_status == 'running':
+ no_of_active_vms += 1
+
+ self._vnfrs[vnfr.id]._active_vdus = no_of_active_vms
yield from self._vnfrs[vnfr.id].update_state(vnfr)
nsr = self.find_nsr_for_vnfr(vnfr.id)
- yield from nsr.update_state()
+ if nsr is not None:
+ nsr._vnf_inst_started = False
+ yield from nsr.update_state()
def find_nsr_for_vnfr(self, vnfr_id):
""" Find the NSR which )has the passed vnfr id"""
@asyncio.coroutine
def get_nsr_config(self, nsd_id):
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
Terminate network service for the given NSR Id
"""
+ if nsr_id not in self._nsrs:
+ return
+
# Terminate the instances/networks assocaited with this nw service
self._log.debug("Terminating the network service %s", nsr_id)
try :
except Exception as e:
self.log.exception("Failed to terminate NSR[id=%s]", nsr_id)
- # Unpublish the NSR record
- self._log.debug("Unpublishing the network service %s", nsr_id)
- yield from self._nsrs[nsr_id].unpublish(xact)
-
- # Finaly delete the NS instance from this NS Manager
- self._log.debug("Deletng the network service %s", nsr_id)
- self.delete_nsr(nsr_id)
+ def vlr_event(self, vlr, action):
+ self._log.debug("Received VLR %s with action:%s", vlr, action)
+ # Find the NS and see if we can proceed
+ nsr = self.find_nsr_for_vlr_id(vlr.id)
+ if nsr is None:
+ self._log.error("VLR %s:%s received for NSR, state:%s",
+ vlr.id, vlr.name, vlr.operational_status)
+ return
+ nsr.vlr_event(vlr, action)
+
+ def add_vlr_id_nsr_map(self, vlr_id, nsr):
+ """ Add a mapping for vlr_id into NSR """
+ self._nsr_for_vlr[vlr_id] = nsr
+
+ def remove_vlr_id_nsr_map(self, vlr_id):
+ """ Remove a mapping for vlr_id into NSR """
+ if vlr_id in self._nsr_for_vlr:
+ del self._nsr_for_vlr[vlr_id]
+
+ def find_nsr_for_vlr_id(self, vlr_id):
+ """ Find NSR for VLR id """
+ nsr = None
+ if vlr_id in self._nsr_for_vlr:
+ nsr = self._nsr_for_vlr[vlr_id]
+ return nsr
class NsmRecordsPublisherProxy(object):
""" This class provides a publisher interface that allows plugin objects
to publish NSR/VNFR/VLR"""
- def __init__(self, dts, log, loop, nsr_pub_hdlr, vnfr_pub_hdlr, vlr_pub_hdlr):
+ def __init__(self, dts, log, loop, project, nsr_pub_hdlr,
+ vnfr_pub_hdlr, vlr_pub_hdlr,):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._nsr_pub_hdlr = nsr_pub_hdlr
self._vlr_pub_hdlr = vlr_pub_hdlr
self._vnfr_pub_hdlr = vnfr_pub_hdlr
+ @asyncio.coroutine
+ def publish_nsr_opdata(self, xact, nsr):
+ """ Publish an NSR """
+ path = ("D,/nsr:ns-instance-opdata" + "/nsr:nsr[nsr:ns-instance-config-ref={}]"
+ ).format(quoted_key(nsr.ns_instance_config_ref))
+ return (yield from self._nsr_pub_hdlr.update(xact, path, nsr))
+
@asyncio.coroutine
def publish_nsr(self, xact, nsr):
""" Publish an NSR """
- path = NetworkServiceRecord.xpath_from_nsr(nsr)
+ path = self._project.add_project(NetworkServiceRecord.xpath_from_nsr(nsr))
return (yield from self._nsr_pub_hdlr.update(xact, path, nsr))
@asyncio.coroutine
def unpublish_nsr(self, xact, nsr):
""" Unpublish an NSR """
- path = NetworkServiceRecord.xpath_from_nsr(nsr)
+ path = self._project.add_project(NetworkServiceRecord.xpath_from_nsr(nsr))
return (yield from self._nsr_pub_hdlr.delete(xact, path))
@asyncio.coroutine
def publish_vnfr(self, xact, vnfr):
""" Publish an VNFR """
- path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
+ path = self._project.add_project(VirtualNetworkFunctionRecord.vnfr_xpath(vnfr))
return (yield from self._vnfr_pub_hdlr.update(xact, path, vnfr))
@asyncio.coroutine
def unpublish_vnfr(self, xact, vnfr):
""" Unpublish a VNFR """
- path = VirtualNetworkFunctionRecord.vnfr_xpath(vnfr)
- return (yield from self._vnfr_pub_hdlr.delete(xact, path))
+ path = self._project.add_project(VirtualNetworkFunctionRecord.vnfr_xpath(vnfr))
+ yield from self._vnfr_pub_hdlr.delete(xact, path)
+ # NOTE: The regh delete does not send the on_prepare to VNFM tasklet as well
+ # as remove all the VNFR elements. So need to send this additional delete block.
+ with self._dts.transaction(flags = 0) as xact:
+ block = xact.block_create()
+ block.add_query_delete(path)
+ yield from block.execute(flags=0, now=True)
@asyncio.coroutine
def publish_vlr(self, xact, vlr):
""" Publish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.update(xact, path, vlr))
@asyncio.coroutine
def unpublish_vlr(self, xact, vlr):
""" Unpublish a VLR """
- path = VirtualLinkRecord.vlr_xpath(vlr)
+ path = self._project.add_project(VirtualLinkRecord.vlr_xpath(vlr))
return (yield from self._vlr_pub_hdlr.delete(xact, path))
-
class ScalingRpcHandler(mano_dts.DtsHandler):
""" The Network service Monitor DTS handler """
SCALE_IN_INPUT_XPATH = "I,/nsr:exec-scale-in"
ACTION = Enum('ACTION', 'SCALE_IN SCALE_OUT')
- def __init__(self, log, dts, loop, callback=None):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, nsm, callback=None):
+ super().__init__(log, dts, loop, nsm._project)
+ self._nsm = nsm
self.callback = callback
self.last_instance_id = defaultdict(int)
+ self._reg_in = None
+ self._reg_out = None
+
@asyncio.coroutine
def register(self):
+ def send_err_msg(err_msg, xact_info, ks_path, e=False):
+ xpath = ks_path.to_xpath(NsrYang.get_schema())
+ if e:
+ self._log.exception(err_msg)
+ else:
+ self._log.error(err_msg)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ xpath,
+ err_msg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+
@asyncio.coroutine
def on_scale_in_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ self._log.debug("Scale in called: {}".format(msg.as_dict()))
+ if not self.project.rpc_check(msg, xact_info):
+ return
+
try:
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleIn.from_dict({
"instance_id": msg.instance_id})
+ nsr = self._nsm.nsrs[msg.nsr_id_ref]
+ if nsr.state != NetworkServiceRecordState.RUNNING:
+ errmsg = ("Unable to perform scaling action when NS {}({}) not in running state".
+ format(nsr.name, nsr.id))
+ send_err_msg(errmsg, xact_info, ks_path)
+ return
+
xact_info.respond_xpath(
rwdts.XactRspCode.ACK,
self.__class__.SCALE_IN_OUTPUT_XPATH,
if self.callback:
self.callback(xact_info.xact, msg, self.ACTION.SCALE_IN)
+
except Exception as e:
- self.log.exception(e)
- xact_info.respond_xpath(
- rwdts.XactRspCode.NACK,
- self.__class__.SCALE_IN_OUTPUT_XPATH)
+ errmsg = ("Exception doing scale in using {}: {}".
+ format(msg, e))
+ send_err_msg(errmsg, xact_info, ks_path, e=True)
@asyncio.coroutine
def on_scale_out_prepare(xact_info, action, ks_path, msg):
assert action == rwdts.QueryAction.RPC
+ self._log.debug("Scale out called: {}".format(msg.as_dict()))
+ if not self.project.rpc_check(msg, xact_info):
+ return
+
try:
scaling_group = msg.scaling_group_name_ref
if not msg.instance_id:
msg.instance_id = last_instance_id + 1
self.last_instance_id[scale_group] += 1
+ nsr = self._nsm.nsrs[msg.nsr_id_ref]
+ if nsr.state != NetworkServiceRecordState.RUNNING:
+ errmsg = ("Unable to perform scaling action when NS {}({}) not in running state".
+ format(nsr.name, nsr.id))
+ send_err_msg(errmsg, xact_info, ks_path)
+ return
+
rpc_op = NsrYang.YangOutput_Nsr_ExecScaleOut.from_dict({
"instance_id": msg.instance_id})
if self.callback:
self.callback(xact_info.xact, msg, self.ACTION.SCALE_OUT)
+
except Exception as e:
- self.log.exception(e)
- xact_info.respond_xpath(
- rwdts.XactRspCode.NACK,
- self.__class__.SCALE_OUT_OUTPUT_XPATH)
+ errmsg = ("Exception doing scale in using {}: {}".
+ format(msg, e))
+ send_err_msg(errmsg, xact_info, ks_path, e=True)
- scale_in_hdl = rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_scale_in_prepare)
- scale_out_hdl = rift.tasklets.DTS.RegistrationHandler(
- on_prepare=on_scale_out_prepare)
+ self._reg_in = yield from self.dts.register(
+ xpath=self.__class__.SCALE_IN_INPUT_XPATH,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_scale_in_prepare),
+ flags=rwdts.Flag.PUBLISHER)
- with self.dts.group_create() as group:
- group.register(
- xpath=self.__class__.SCALE_IN_INPUT_XPATH,
- handler=scale_in_hdl,
- flags=rwdts.Flag.PUBLISHER)
- group.register(
- xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
- handler=scale_out_hdl,
- flags=rwdts.Flag.PUBLISHER)
+ self._reg_out = yield from self.dts.register(
+ xpath=self.__class__.SCALE_OUT_INPUT_XPATH,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_prepare=on_scale_out_prepare),
+ flags=rwdts.Flag.PUBLISHER)
+ def deregister(self):
+ if self._reg_in:
+ self._reg_in.deregister()
+ self._reg_in = None
-class NsmTasklet(rift.tasklets.Tasklet):
- """
- The network service manager tasklet
- """
- def __init__(self, *args, **kwargs):
- super(NsmTasklet, self).__init__(*args, **kwargs)
- self.rwlog.set_category("rw-mano-log")
- self.rwlog.set_subcategory("nsm")
+ if self._reg_out:
+ self._reg_out.deregister()
+ self._reg_out = None
- self._dts = None
+
+class NsmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(NsmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
self._nsm = None
self._ro_plugin_selector = None
self._vnffgmgr = None
- self._nsr_handler = None
+ self._nsr_pub_handler = None
self._vnfr_pub_handler = None
self._vlr_pub_handler = None
self._vnfd_pub_handler = None
self._records_publisher_proxy = None
- def start(self):
- """ The task start callback """
- super(NsmTasklet, self).start()
- self.log.info("Starting NsmTasklet")
-
- self.log.debug("Registering with dts")
- self._dts = rift.tasklets.DTS(self.tasklet_info,
- RwNsmYang.get_schema(),
- self.loop,
- self.on_dts_state_change)
-
- self.log.debug("Created DTS Api GI Object: %s", self._dts)
-
- def stop(self):
- try:
- self._dts.deinit()
- except Exception:
- print("Caught Exception in NSM stop:", sys.exc_info()[0])
- raise
-
- def on_instance_started(self):
- """ Task instance started callback """
- self.log.debug("Got instance started callback")
+ def vlr_event(self, vlr, action):
+ """ VLR Event callback """
+ self.log.debug("VLR Event received for VLR %s with action %s", vlr, action)
+ self._nsm.vlr_event(vlr, action)
@asyncio.coroutine
- def init(self):
- """ Task init callback """
- self.log.debug("Got instance started callback")
-
- self.log.debug("creating config account handler")
+ def register(self):
+ self.log.debug("Register NsmProject for {}".format(self.name))
- self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(self._dts, self.log, self.loop)
+ self._nsr_pub_handler = publisher.NsrOpDataDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._nsr_pub_handler.register()
- self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(self._dts, self.log, self.loop)
+ self._vnfr_pub_handler = publisher.VnfrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._vnfr_pub_handler.register()
- self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(self._dts, self.log, self.loop)
+ self._vlr_pub_handler = publisher.VlrPublisherDtsHandler(
+ self._dts, self.log, self.loop, self)
yield from self._vlr_pub_handler.register()
- manifest = self.tasklet_info.get_pb_manifest()
+ self._vlr_sub_handler = subscriber.VlrSubscriberDtsHandler(self.log,
+ self._dts,
+ self.loop,
+ self,
+ self.vlr_event,
+ )
+ yield from self._vlr_sub_handler.register()
+
+ manifest = self._tasklet.tasklet_info.get_pb_manifest()
use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
ssl_key = manifest.bootstrap_phase.rwsecurity.key
- self._vnfd_pub_handler = publisher.VnfdPublisher(use_ssl, ssl_cert, ssl_key, self.loop)
+ self._vnfd_pub_handler = publisher.VnfdPublisher(
+ use_ssl, ssl_cert, ssl_key, self.loop, self)
self._records_publisher_proxy = NsmRecordsPublisherProxy(
self._dts,
self.log,
self.loop,
+ self,
self._nsr_pub_handler,
self._vnfr_pub_handler,
self._vlr_pub_handler,
# Register the NSM to receive the nsm plugin
# when cloud account is configured
- self._ro_plugin_selector = cloud.ROAccountPluginSelector(
+ self._ro_plugin_selector = cloud.ROAccountConfigSubscriber(
self._dts,
self.log,
self.loop,
- self._records_publisher_proxy,
+ self,
+ self._records_publisher_proxy
)
yield from self._ro_plugin_selector.register()
self._cloud_account_handler = cloud.CloudAccountConfigSubscriber(
self._log,
self._dts,
- self.log_hdl)
+ self.log_hdl,
+ self,
+ )
yield from self._cloud_account_handler.register()
- self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop)
+ self._vnffgmgr = rwvnffgmgr.VnffgMgr(self._dts, self.log, self.log_hdl, self.loop,
+ self, self._cloud_account_handler)
yield from self._vnffgmgr.register()
self._nsm = NsManager(
self._dts,
self.log,
self.loop,
+ self,
self._nsr_pub_handler,
self._vnfr_pub_handler,
self._vlr_pub_handler,
self._ro_plugin_selector,
self._vnffgmgr,
self._vnfd_pub_handler,
- self._cloud_account_handler
+ self._cloud_account_handler,
)
yield from self._nsm.register()
+ self.log.debug("Register NsmProject for {} complete".format(self.name))
+
+ def deregister(self):
+ self._log.debug("Project {} de-register".format(self.name))
+ self._nsm.deregister()
+ self._vnffgmgr.deregister()
+ self._cloud_account_handler.deregister()
+ self._ro_plugin_selector.deregister()
+ self._nsr_pub_handler.deregister()
+ self._vnfr_pub_handler.deregister()
+ self._vlr_pub_handler.deregister()
+ self._vlr_sub_handler.deregister()
+ self._nsm = None
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ if self._nsm and self._nsm._nsrs:
+ delete_msg = "Project has NSR associated with it. Delete all Project NSR and try again."
+ return False, delete_msg
+ return True, "True"
+
+
+class NsmTasklet(rift.tasklets.Tasklet):
+ """
+ The network service manager tasklet
+ """
+ def __init__(self, *args, **kwargs):
+ super(NsmTasklet, self).__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+ self.rwlog.set_subcategory("nsm")
+
+ self._dts = None
+ self.project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def start(self):
+ """ The task start callback """
+ super(NsmTasklet, self).start()
+ self.log.info("Starting NsmTasklet")
+
+ self.log.debug("Registering with dts")
+ self._dts = rift.tasklets.DTS(self.tasklet_info,
+ RwNsmYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change)
+
+ self.log.debug("Created DTS Api GI Object: %s", self._dts)
+
+ def stop(self):
+ try:
+ self._dts.deinit()
+ except Exception:
+ print("Caught Exception in NSM stop:", sys.exc_info()[0])
+ raise
+
+ def on_instance_started(self):
+ """ Task instance started callback """
+ self.log.debug("Got instance started callback")
+
+ @asyncio.coroutine
+ def init(self):
+ """ Task init callback """
+ self.log.debug("Got instance started callback")
+
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, NsmProject)
+ self.project_handler.register()
+
+
@asyncio.coroutine
def run(self):
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
class VnffgMgr(object):
""" Implements the interface to backend plugins to fetch topology """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project, cloud_account_handler):
self._account = {}
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._cloud_account_handler = cloud_account_handler
+ self._project = project
self._sdn = {}
- self._sdn_handler = SDNAccountDtsHandler(self._dts,self._log,self)
+ self._sdn_handler = SDNAccountDtsHandler(self._dts, self._log, self)
self._vnffgr_list = {}
@asyncio.coroutine
def register(self):
yield from self._sdn_handler.register()
+ def deregister(self):
+ self._log.debug("Project {} de-register vnffgmgr".
+ format(self._project.name))
+ self._sdn_handler.deregister()
+
def set_sdn_account(self,account):
if (account.name in self._account):
self._log.error("SDN Account is already set")
else:
- sdn_account = RwsdnalYang.SDNAccount()
+ sdn_account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
sdn_account.from_dict(account.as_dict())
sdn_account.name = account.name
self._account[account.name] = sdn_account
def get_sdn_account(self, name):
"""
- Creates an object for class RwsdnalYang.SdnAccount()
+ Creates an object for class RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
"""
if (name in self._account):
return self._account[name]
self._log.error("VNFFGR with id %s not present in VNFFGMgr", vnffgr_id)
msg = "VNFFGR with id {} not present in VNFFGMgr".format(vnffgr_id)
raise VnffgrDoesNotExist(msg)
- self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account)
+ sdn_acct = self.get_sdn_account(self._vnffgr_list[vnffgr_id].sdn_account)
+ self._log.debug("SDN account received during vnffg update is %s",sdn_acct)
+ if sdn_acct.account_type != 'openstack':
+ self.update_vnffgrs(self._vnffgr_list[vnffgr_id].sdn_account)
vnffgr = self._vnffgr_list[vnffgr_id].deep_copy()
self._log.debug("VNFFGR for id %s is %s",vnffgr_id,vnffgr)
return vnffgr
sdn_plugin = self.get_sdn_plugin(sdn_acct_name)
for rsp in vnffgr.rsp:
- vnffg = RwsdnalYang.VNFFGChain()
+ vnffg = RwsdnalYang.YangData_RwProject_Project_Vnffgs_VnffgChain()
vnffg.name = rsp.name
vnffg.classifier_name = rsp.classifier_name
vnffgr.operational_status = 'failed'
msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
raise VnffgrCreationFailed(msg)
-
+ rsp.rsp_id = rs
self._log.info("VNFFG chain created successfully for rsp with id %s",rsp.id)
vnffgr_cl = [_classifier for _classifier in vnffgr.classifier if classifier.id == _classifier.id]
if len(vnffgr_cl) > 0:
cl_rsp_name = vnffgr_cl[0].rsp_name
+ rsp_ids = [rsp.rsp_id for rsp in vnffgr.rsp if rsp.name == cl_rsp_name]
+ self._log.debug("Received RSP id for Cl is %s",rsp_ids)
else:
self._log.error("No RSP wiht name %s found; Skipping classifier %s creation",classifier.rsp_id_ref,classifier.name)
continue
- vnffgcl = RwsdnalYang.VNFFGClassifier()
+ vnffgcl = RwsdnalYang.YangData_RwProject_Project_VnffgClassifiers_VnffgClassifier()
vnffgcl.name = classifier.name
vnffgcl.rsp_name = cl_rsp_name
+ vnffgcl.rsp_id = rsp_ids[0]
vnffgcl.port_id = vnffgr_cl[0].port_id
vnffgcl.vm_id = vnffgr_cl[0].vm_id
# Get the symmetric classifier endpoint ip and set it in nsh ctx1
#acl.name = vnffgcl.name + str(index)
acl.name = match_rule.id
acl.ip_proto = match_rule.ip_proto
- acl.source_ip_address = match_rule.source_ip_address + '/32'
+ if match_rule.source_ip_address:
+ acl.source_ip_address = match_rule.source_ip_address + '/32'
acl.source_port = match_rule.source_port
- acl.destination_ip_address = match_rule.destination_ip_address + '/32'
+ if match_rule.destination_ip_address:
+ acl.destination_ip_address = match_rule.destination_ip_address + '/32'
acl.destination_port = match_rule.destination_port
self._log.debug(" Creating VNFFG Classifier Classifier %s for RSP: %s",vnffgcl.name,vnffgcl.rsp_name)
#vnffgr.operational_status = 'failed'
#msg = "Instantiation of VNFFGR with id {} failed".format(vnffgr.id)
#raise VnffgrCreationFailed(msg)
+ else:
+ vnffgr_cl[0].classifier_id = rs
vnffgr.operational_status = 'running'
- self.update_vnffgrs(vnffgr.sdn_account)
+ sdn_acct = self.get_sdn_account(vnffgr.sdn_account)
+ self._log.debug("SDN account received during vnffg update is %s",sdn_acct)
+ if sdn_acct.account_type != 'openstack':
+ self.update_vnffgrs(vnffgr.sdn_account)
return vnffgr
def update_vnffgrs(self,sdn_acct_name):
sdn_account = [sdn_account.name for _,sdn_account in self._account.items()]
sdn_account_name = sdn_account[0]
sdn_plugin = self.get_sdn_plugin(sdn_account_name)
- sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id)
- sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id)
+ vnffgr = self._vnffgr_list[vnffgr_id]
+ sdn_acct = self.get_sdn_account(vnffgr.sdn_account)
+ self._log.debug("SDN account received during vnffg update is %s",sdn_acct)
+ if sdn_acct.account_type == 'openstack':
+ for rsp in vnffgr.rsp:
+ sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],rsp.rsp_id)
+ for classifier in vnffgr.classifier:
+ sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],classifier.classifier_id)
+ else:
+ sdn_plugin.terminate_vnffg_chain(self._account[sdn_account_name],vnffgr_id)
+ sdn_plugin.terminate_vnffg_classifier(self._account[sdn_account_name],vnffgr_id)
del self._vnffgr_list[vnffgr_id]
class SDNAccountDtsHandler(object):
self._dts = dts
self._log = log
self._parent = parent
+ self._project = self._parent._project
self._sdn_account = {}
+ self._reg = None
def _set_sdn_account(self, account):
self._log.info("Setting sdn account: {}".format(account))
def register(self):
def apply_config(dts, acg, xact, action, _):
self._log.debug("Got sdn account apply config (xact: %s) (action: %s)", xact, action)
- if action == rwdts.AppconfAction.INSTALL and xact.id is None:
- self._log.debug("No xact handle. Skipping apply config")
+ if xact.id is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.info("Config Agent Account {} being re-added after restart.".
+ format(cfg.name))
+ self._set_sdn_account(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
return RwTypes.RwStatus.SUCCESS
return RwTypes.RwStatus.SUCCESS
if msg.has_field("account_type"):
errmsg = "Cannot update SDN account's account-type."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Update the sdn account record
if not msg.has_field('account_type'):
errmsg = "New SDN account must contain account-type field."
self._log.error(errmsg)
- xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
- SDNAccountDtsHandler.XPATH,
- errmsg)
+ xact_info.send_error_xpath(
+ RwTypes.RwStatus.FAILURE,
+ self._project.add_project(SDNAccountDtsHandler.XPATH),
+ errmsg
+ )
raise SdnAccountError(errmsg)
# Set the sdn account record
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for Sdn Account config using xpath: %s",
- SDNAccountDtsHandler.XPATH,
- )
+ xpath = self._project.add_project(SDNAccountDtsHandler.XPATH)
+ self._log.debug("Registering for Sdn Account config using xpath: {}".
+ format(xpath))
acg_handler = rift.tasklets.AppConfGroup.Handler(
on_apply=apply_config,
)
with self._dts.appconf_group_create(acg_handler) as acg:
- acg.register(
- xpath=SDNAccountDtsHandler.XPATH,
- flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
- on_prepare=on_prepare
- )
-
-
-
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
+ on_prepare=on_prepare
+ )
+
+ def deregister(self):
+ self._log.debug("De-register SDN Account handler in vnffg for project".
+ format(self._project.name))
+ self._reg.deregister()
+ self._reg = None
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from enum import Enum
-from gi.repository import NsdYang, NsrYang
+import gi
+gi.require_version('NsdBaseYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+from gi.repository import (
+ NsdBaseYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang
+ )
class ScalingGroupIndexExists(Exception):
def create_record_msg(self):
""" Returns a NSR Scaling group record """
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord(
scaling_group_name_ref=self.name,
)
def trigger_map(self, trigger):
trig_map = {
- NsdYang.ScalingTrigger.PRE_SCALE_IN : 'pre_scale_in',
- NsdYang.ScalingTrigger.POST_SCALE_IN : 'post_scale_in',
- NsdYang.ScalingTrigger.PRE_SCALE_OUT : 'pre_scale_out',
- NsdYang.ScalingTrigger.POST_SCALE_OUT : 'post_scale_out',
+ NsdBaseYang.ScalingTrigger.PRE_SCALE_IN : 'pre_scale_in',
+ NsdBaseYang.ScalingTrigger.POST_SCALE_IN : 'post_scale_in',
+ NsdBaseYang.ScalingTrigger.PRE_SCALE_OUT : 'pre_scale_out',
+ NsdBaseYang.ScalingTrigger.POST_SCALE_OUT : 'post_scale_out',
}
try:
return self._vnfrs.values()
def create_record_msg(self):
- msg = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
+ msg = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr_ScalingGroupRecord_Instance(
instance_id=self._instance_id,
create_time=self._create_time,
op_status=self._op_status,
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="1">
- <data>
- <cm-config xmlns="http://riftio.com/ns/riftware-1.0/rw-conman">
- <initiate-nsr-cfg></initiate-nsr-cfg>
- </cm-config>
- </data>
-</rpc-reply>
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import rift.mano.dts as mano_dts
+import asyncio
+
+from gi.repository import (
+ RwDts as rwdts,
+ RwTypes,
+ RwVlrYang,
+ RwYang
+ )
+import rift.tasklets
+
+import requests
+
+
+class VlrSubscriberDtsHandler(mano_dts.AbstractOpdataSubscriber):
+ """ VLR DTS handler """
+ XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project, callback)
+
+ def get_xpath(self):
+ return ("D,/vlr:vlr-catalog/vlr:vlr")
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/publisher/copy_status.py
rift/tasklets/${TASKLET_NAME}/subscriber/__init__.py
rift/tasklets/${TASKLET_NAME}/subscriber/download_status.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
rift_add_subdirs(test)
# Author(s): Nandan Sinha
#
+import enum
+import gi
+import json
import os
-import uuid
import shutil
-import enum
+import uuid
-import gi
gi.require_version('RwVnfdYang', '1.0')
gi.require_version('RwNsdYang', '1.0')
from gi.repository import (
)
import rift.package.icon as icon
+import rift.tasklets.rwlaunchpad.onboard as onboard
class PackageCopyError(Exception):
pass
return self.__dict__
def to_yang(self):
- job = RwPkgMgmtYang.CopyJob.from_dict({
+ job = RwPkgMgmtYang.YangData_RwProject_Project_CopyJobs_Job.from_dict({
"transaction_id": self.transaction_id,
"status": CopyMeta.STATUS_MAP[self.state]
})
return job
+class CopyManifest:
+ """ Utility class to hold manifest information."""
+ def __init__(self, project, log):
+ self.tasklet_info = project.tasklet.tasklet_info
+ self.manifest = self.tasklet_info.get_pb_manifest()
+ self.use_ssl = self.manifest.bootstrap_phase.rwsecurity.use_ssl
+ self.ssl_cert, self.ssl_key = None, None
+ if self.use_ssl:
+ self.ssl_cert = self.manifest.bootstrap_phase.rwsecurity.cert
+ self.ssl_key = self.manifest.bootstrap_phase.rwsecurity.key
+ self.onboarder = None
+ self.log = log
+
+ def ssl_manifest(self):
+ return (self.use_ssl, self.ssl_cert, self.ssl_key)
+
+ def get_onboarder(self, host="127.0.0.1", port="8008"):
+ if not self.onboarder:
+ self.onboarder = onboard.DescriptorOnboarder(self.log,
+ host, port, *self.ssl_manifest())
+ return self.onboarder
+
+
class PackageFileCopier:
DESCRIPTOR_MAP = {
"vnfd": (RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd, 'vnfd rw-vnfd'),
}
@classmethod
- def from_rpc_input(cls, rpc_input, proxy, log=None):
+ def from_rpc_input(cls, rpc_input, project, proxy, log=None):
return cls(
rpc_input.package_id,
rpc_input.package_type,
rpc_input.package_name,
+ rpc_input.project_name,
+ project = project,
proxy = proxy,
log=log)
pkg_id,
pkg_type,
pkg_name,
+ proj_name,
+ project,
proxy,
log):
self.src_package_id = pkg_id
self.package_type = pkg_type.lower()
self.dest_package_name = pkg_name
+ self.project_name = proj_name
+ self.manifest = CopyManifest(project, log)
self.dest_package_id = str(uuid.uuid4())
self.transaction_id = str(uuid.uuid4())
self.proxy = proxy
self.src_package = None
self.dest_desc_msg = None
+ @property
+ def onboarder(self):
+ """ Onboarder object to invoke REST endpoint calls."""
+ return self.manifest.get_onboarder()
+
+ @property
+ def progress(self):
+ """ Current status of operations."""
+ return self.meta.to_yang()
+
+ @property
+ def descriptor_msg(self):
+ """ Descriptor message of the generated copied descriptor."""
+ return self.dest_desc_msg
+
# Start of delegate calls
def call_delegate(self, event):
if not self.delegate:
return
- # Send out the descriptor message to be posted on success
- # Otherwise send out the CopyJob yang conversion from meta object.
- if event == "on_download_succeeded":
- getattr(self.delegate, event)(self.dest_desc_msg)
- else:
- getattr(self.delegate, event)(self.meta.to_yang())
+ getattr(self.delegate, event)(self)
def _copy_tree(self):
"""
"""
self.copy_progress()
- store = self.proxy._get_store(self.package_type)
+ store = self.proxy._get_store(self.package_type, \
+ self.project_name if self.project_name else None)
src_path = store._get_package_dir(self.src_package_id)
self.src_package = store.get_package(self.src_package_id)
self.dest_copy_path = os.path.join(
- store.DEFAULT_ROOT_DIR,
+ store.root_dir,
self.dest_package_id)
self.log.debug("Copying contents from {src} to {dest}".
format(src=src_path, dest=self.dest_copy_path))
def _create_descriptor_file(self):
""" Update descriptor file for the newly copied descriptor catalog.
- Use the existing descriptor file to create a descriptor proto gi object,
- change some identifiers, and create a new descriptor yaml file from it.
-
+ Get descriptor contents from REST endpoint, change some identifiers
+ and create a new descriptor yaml file from it.
"""
- src_desc_file = self.src_package.descriptor_file
- src_desc_contents = self.src_package.descriptor_msg.as_dict()
- src_desc_contents.update(
+ # API call for the updated descriptor contents
+ src_desc_contents = self.onboarder.get_updated_descriptor(self.src_package.descriptor_msg, self.project_name)
+
+ # To generate the pb object, extract subtree in dict from "project-nsd:nsd" and root it
+ # under "nsd:nsd-catalog" (or vnfd)
+ root_element = "{0}:{0}-catalog".format(self.package_type)
+ extract_sub_element = "project-{0}:{0}".format(self.package_type, self.package_type)
+ src_desc_contents[extract_sub_element].update(
id =self.dest_package_id,
name = self.dest_package_name,
short_name = self.dest_package_name
)
+ D = {}
+ D[root_element] = {self.package_type : src_desc_contents[extract_sub_element]}
+ # Build the proto-buf gi object from generated JSON
+ json_desc_msg = json.dumps(D)
+ self.log.debug("*** JSON contents: {}".format(json_desc_msg))
desc_cls, modules = PackageFileCopier.DESCRIPTOR_MAP[self.package_type]
- self.dest_desc_msg = desc_cls.from_dict(src_desc_contents)
- dest_desc_path = os.path.join(self.dest_copy_path,
- "{pkg_name}_{pkg_type}.yaml".format(pkg_name=self.dest_package_name, pkg_type=self.package_type))
- model = RwYang.Model.create_libncx()
+
+ model = RwYang.Model.create_libyang()
for module in modules.split():
model.load_module(module)
+ self.dest_desc_msg = desc_cls.from_json(model, json_desc_msg, strict=False)
+
+ # Write to yaml desc file
+ dest_desc_path = os.path.join(self.dest_copy_path,
+ "{pkg_name}_{pkg_type}.yaml".format(pkg_name=self.dest_package_name, pkg_type=self.package_type))
with open(dest_desc_path, "w") as fh:
fh.write(self.dest_desc_msg.to_yaml(model))
+ # Remove copied .yaml, if present
+ src_desc_file = self.src_package.descriptor_file
copied_desc_file = os.path.join(self.dest_copy_path, os.path.basename(src_desc_file))
if os.path.exists(copied_desc_file):
self.log.debug("Deleting copied yaml from old source %s" % (copied_desc_file))
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
+import gi
+
import rift.downloader as downloader
from gi.repository import RwPkgMgmtYang
}
@classmethod
- def from_rpc_input(cls, rpc_input, file_obj, proxy, log=None, auth=None):
+ def from_rpc_input(cls, rpc_input, file_obj, proxy, log=None, auth=None, project=None):
"""Convenience class to set up an instance form RPC data
"""
url_downloader = cls(
auth=auth,
proxy=proxy,
file_obj=file_obj,
- log=log)
+ log=log,
+ project=project)
return url_downloader
delete_on_fail=True,
decompress_on_fly=False,
auth=None,
- log=None):
+ log=None,
+ project=None):
super().__init__(
url,
file_obj=file_obj,
self.package_file_type = vnfd_file_type.lower() \
if package_type == 'VNFD' else nsd_file_type.lower()
self.proxy = proxy
+ self.project = project
def convert_to_yang(self):
- job = RwPkgMgmtYang.DownloadJob.from_dict({
+ job = RwPkgMgmtYang.YangData_RwProject_Project_DownloadJobs_Job.from_dict({
"url": self.meta.url,
"download_id": self.meta.download_id,
"package_id": self.package_id,
self.package_type,
self.package_id,
self.package_path,
- self.package_file_type)
+ self.package_file_type,
+ self.project)
except Exception as e:
self.log.exception(e)
- self.job.detail = str(e)
+ self.meta.detail = str(e)
self.download_failed()
return
import rift.package.store as store
import rift.package.package
import rift.package.icon as icon
+import rift.package.checksums as checksums
from .base import AbstractPackageManagerProxy
-
+from rift.tasklets.rwlaunchpad import image
class UnknownPackageType(Exception):
pass
# Refer: https://confluence.riftio.com/display/ATG/Launchpad+package+formats
SCHEMA = {
"nsd": ["icons", "ns_config", "scripts", "vnf_config"],
- "vnfd": ["charms", "cloud_init", "icons", "images", "scripts", "readme"]
+ "vnfd": ["charms", "cloud_init", "icons", "images", "scripts", "readme", "test", "doc"]
}
SCHEMA_TO_PERMS = {'scripts': 0o777}
- def __init__(self, loop, log):
+ def __init__(self, loop, log, dts):
self.loop = loop
self.log = log
+ self.dts = dts
self.store_cache = {}
+ self.uploader = image.ImageUploader(self.log, self.loop, self.dts)
- def _get_store(self, package_type):
+ def _get_store(self, package_type, project_name = None):
store_cls = self.PACKAGE_TYPE_MAP[package_type]
- store = self.store_cache.setdefault(package_type, store_cls(self.log))
+ self.store_cache[package_type] = store_cls(self.log, project=project_name)
+ store = self.store_cache[package_type]
return store
@asyncio.coroutine
- def endpoint(self, package_type, package_id):
+ def endpoint(self, package_type, package_id, project_name=None):
package_type = package_type.lower()
if package_type not in self.PACKAGE_TYPE_MAP:
raise UnknownPackageType()
-
- store = self._get_store(package_type)
+
+ store = self._get_store(package_type, project_name)
package = store._get_package_dir(package_id)
- rel_path = os.path.relpath(package, start=store.root_dir)
-
- url = "https://127.0.0.1:4567/api/package/{}/{}".format(package_type, rel_path)
+ rel_path = os.path.relpath(package, start=os.path.dirname(store.root_dir))
+ url = "https://127.0.0.1:8008/mano/api/package/{}/{}".format(package_type, rel_path)
+
return url
@asyncio.coroutine
return self.SCHEMA[package_type]
- def package_file_add(self, new_file, package_type, package_id, package_path, package_file_type):
+ def package_file_add(self, new_file, package_type, package_id, package_path, package_file_type, project_name):
# Get the schema from thr package path
# the first part will always be the vnfd/nsd name
mode = 0o664
# for files other than README, create the package path from the asset type, e.g. icons/icon1.png
# for README files, strip off any leading '/'
+ file_name = package_path
package_path = package_file_type + "/" + package_path \
if package_file_type != "readme" else package_path.strip('/')
+
components = package_path.split("/")
if len(components) > 2:
schema = components[1]
# Fetch the package object
package_type = package_type.lower()
- store = self._get_store(package_type)
+ store = self._get_store(package_type, project_name)
package = store.get_package(package_id)
# Construct abs path of the destination obj
# Insert (by copy) the file in the package location. For icons,
# insert also in UI location for UI to pickup
try:
+ self.log.debug("Inserting file {} in the destination {} - {} ".format(dest_file, package_path, dest_file))
package.insert_file(new_file, dest_file, package_path, mode=mode)
if package_file_type == 'icons':
icon_extract = icon.PackageIconExtractor(self.log)
icon_extract.extract_icons(package)
+ if package_file_type == 'images':
+ image_hdl = package.open(package_path)
+ image_checksum = checksums.checksum(image_hdl)
+
+ try:
+ self.uploader.upload_image(file_name, image_checksum, image_hdl, {})
+ self.uploader.upload_image_to_cloud_accounts(file_name, image_checksum, project_name)
+ finally:
+ _ = image_hdl.close()
except rift.package.package.PackageAppendError as e:
self.log.exception(e)
return False
self.log.debug("File insertion complete at {}".format(dest_file))
return True
- def package_file_delete(self, package_type, package_id, package_path, package_file_type):
+ def package_file_delete(self, package_type, package_id, package_path, package_file_type, project_name):
package_type = package_type.lower()
- store = self._get_store(package_type)
+ store = self._get_store(package_type, project_name)
package = store.get_package(package_id)
# for files other than README, create the relative package path from the asset type
# Author(s): Nandan Sinha
#
-import sys
-import asyncio
-import uuid
import abc
+import asyncio
import functools
+import gi
+import sys
+import uuid
from concurrent.futures import Future
from gi.repository import (RwDts as rwdts)
import rift.mano.dts as mano_dts
import rift.downloader as url_downloader
import rift.tasklets.rwlaunchpad.onboard as onboard
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
if sys.version_info < (3, 4, 4):
asyncio.ensure_future = asyncio.async
class CopyStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
- def __init__(self, log, dts, loop, tasklet_info):
- super().__init__(log, dts, loop)
- self.tasks = {}
- self.tasklet_info = tasklet_info
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project)
+ self.tasks = {}
+ self.tasklet_info = project.tasklet.tasklet_info
def xpath(self, transaction_id=None):
- return ("D,/rw-pkg-mgmt:copy-jobs/rw-pkg-mgmt:job" +
- ("[transaction-id='{}']".format(transaction_id) if transaction_id else ""))
+ return self.project.add_project("D,/rw-pkg-mgmt:copy-jobs/rw-pkg-mgmt:job" +
+ ("[transaction-id={}]".format(quoted_key(transaction_id)) if transaction_id else ""))
pass
-
+
@asyncio.coroutine
def register(self):
self.reg = yield from self.dts.register(xpath=self.xpath(),
assert self.reg is not None
+ def deregister(self):
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
+
@asyncio.coroutine
def register_copier(self, copier):
copier.delegate = self
def on_download_progress(self, job_msg):
"""callback that triggers update.
"""
- return self._schedule_dts_work(job_msg)
+ return self._schedule_dts_work(job_msg.progress)
def on_download_finished(self, job_msg):
"""callback that triggers update.
if key in self.tasks:
del self.tasks[key]
- return self._schedule_dts_work(job_msg)
+ return self._schedule_dts_work(job_msg.progress)
def on_download_succeeded(self, job_msg):
"""Post the catalog descriptor object to the http endpoint.
- Argument: job_msg (proto-gi descriptor_msg of the copied descriptor)
+ Argument: job_msg (of type PackageFileCopier)
"""
- manifest = self.tasklet_info.get_pb_manifest()
- use_ssl = manifest.bootstrap_phase.rwsecurity.use_ssl
- ssl_cert, ssl_key = None, None
- if use_ssl:
- ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
- ssl_key = manifest.bootstrap_phase.rwsecurity.key
-
- onboarder = onboard.DescriptorOnboarder(self.log,
- "127.0.0.1", 8008, use_ssl, ssl_cert, ssl_key)
try:
- onboarder.onboard(job_msg)
+ job_msg.onboarder.onboard(job_msg.descriptor_msg, project=self._project.name)
except onboard.OnboardError as e:
self.log.error("Onboard exception triggered while posting copied catalog descriptor %s", e)
raise
#
import asyncio
+import gi
import sys
from gi.repository import (RwDts as rwdts)
import rift.mano.dts as mano_dts
import rift.downloader as url_downloader
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import functools
import concurrent
class DownloadStatusPublisher(mano_dts.DtsHandler, url_downloader.DownloaderProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project)
self.tasks = {}
def xpath(self, download_id=None):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
- ("[download-id='{}']".format(download_id) if download_id else ""))
+ return self._project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job" +
+ ("[download-id={}]".
+ format(quoted_key(download_id)) if download_id else ""))
@asyncio.coroutine
def _dts_publisher(self, job):
flags=rwdts.Flag.PUBLISHER|rwdts.Flag.CACHE|rwdts.Flag.NO_PREP_READ)
assert self.reg is not None
+
+ def dergister(self):
+ self._log.debug("De-registering download status for project {}".
+ format(self.project.name))
+ if self.reg:
+ self.reg.deregister()
+ self.reg = None
@staticmethod
def _async_func(func, fut):
import abc
import asyncio
+import gi
import tempfile
from gi.repository import (
url = yield from self.proxy.endpoint(
msg.package_type if msg.has_field('package_type') else "",
- msg.package_id)
+ msg.package_id,
+ msg.project_name if msg.has_field('project_name') else None)
rpc_op = RPC_PKG_ENDPOINT.from_dict({"endpoint": url})
3. Return a tracking ID for the client to monitor the entire status
"""
- def __init__(self, log, dts, loop, proxy, publisher):
+ def __init__(self, log, dts, loop, proxy, tasklet):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
- publisher: Instance of DownloadStatusPublisher
+ publisher: Instance of tasklet to find the DownloadStatusPublisher
+ for a specific project
"""
super().__init__(log, dts, loop)
self.proxy = proxy
- self.publisher = publisher
+ self.tasklet = tasklet
@property
def xpath(self):
return "/rw-pkg-mgmt:package-file-add"
+ def get_publisher(self, msg):
+ try:
+ proj = self.tasklet.projects[msg.project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.job_handler
+
@asyncio.coroutine
def callback(self, ks_path, msg):
+ publisher = self.get_publisher(msg)
+
if not msg.external_url:
# For now we will only support External URL download
raise Exception ("No download URL provided")
auth=auth,
file_obj=filename,
proxy=self.proxy,
- log=self.log)
+ log=self.log,
+ project=msg.project_name)
- download_id = yield from self.publisher.register_downloader(url_downloader)
+ download_id = yield from publisher.register_downloader(url_downloader)
rpc_op = RPC_PACKAGE_ADD_ENDPOINT.from_dict({"task_id": download_id})
return rpc_op
class PackageCopyOperationsRpcHandler(mano_dts.AbstractRpcHandler):
- def __init__(self, log, dts, loop, proxy, publisher):
+ def __init__(self, log, dts, loop, project, proxy, publisher):
"""
Args:
proxy: Any impl of .proxy.AbstractPackageManagerProxy
publisher: CopyStatusPublisher object
"""
- super().__init__(log, dts, loop)
+ super().__init__(log, dts, loop, project)
self.proxy = proxy
self.publisher = publisher
@asyncio.coroutine
def callback(self, ks_path, msg):
import uuid
- copier = pkg_downloader.PackageFileCopier.from_rpc_input(msg, proxy=self.proxy, log=self.log)
+ copier = pkg_downloader.PackageFileCopier.from_rpc_input(msg, self.project, proxy=self.proxy, log=self.log)
transaction_id, dest_package_id = yield from self.publisher.register_copier(copier)
rpc_op = RPC_PACKAGE_COPY_ENDPOINT.from_dict({
msg.package_type,
msg.package_id,
msg.package_path,
- package_file_type)
+ package_file_type,
+ msg.project_name,
+ )
except Exception as e:
self.log.exception(e)
rpc_op.status = str(False)
"""
import asyncio
-
import gi
+
gi.require_version('RwDts', '1.0')
-gi.require_version('RwPkgMgmtYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
from gi.repository import (
RwDts as rwdts,
- RwPkgMgmtYang)
-import rift.tasklets
+ RwLaunchpadYang)
+import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rpc
from .proxy import filesystem
from . import publisher as pkg_publisher
-from . import subscriber
+from . import subscriber
+
+class PackageManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(PackageManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+ proxy = kw["proxy"]
+
+ args = [self.log, self.dts, self.loop, self]
+
+ # create catalog publishers
+ self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
+ self.copy_publisher = pkg_publisher.CopyStatusPublisher(*args)
+
+ # create catalog subscribers
+ self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
+ self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
+
+ args.append(proxy)
+ self.copy_rpc = rpc.PackageCopyOperationsRpcHandler(*(args + [self.copy_publisher]))
+
+ @asyncio.coroutine
+ def register (self):
+ try:
+ yield from self.vnfd_catalog_sub.register()
+ yield from self.nsd_catalog_sub.register()
+ yield from self.copy_rpc.register()
+ yield from self.copy_publisher.register()
+ yield from self.job_handler.register()
+ except Exception as e:
+ self.log.exception("Exception registering project {}: {}".
+ format(self.name, e))
+
+ def deregister (self):
+ self.job_handler.deregister()
+ self.copy_rpc.deregister()
+ self.copy_publisher.deregister()
+ self.vnfd_catalog_sub.deregister()
+ self.nsd_catalog_sub.deregister()
+
class PackageManagerTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
self.rwlog.set_category("rw-mano-log")
self.endpoint_rpc = None
self.schema_rpc = None
+
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
self.log.exception(e)
try:
super().start()
+
self.dts = rift.tasklets.DTS(
self.tasklet_info,
- RwPkgMgmtYang.get_schema(),
+ RwLaunchpadYang.get_schema(),
self.loop,
self.on_dts_state_change
)
-
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
- args = [self.log, self.dts, self.loop]
- # create catalog publishers
- self.job_handler = pkg_publisher.DownloadStatusPublisher(*args)
- self.copy_publisher = pkg_publisher.CopyStatusPublisher(*args +[self.tasklet_info])
-
- # create catalog subscribers
- self.vnfd_catalog_sub = subscriber.VnfdStatusSubscriber(*args)
- self.nsd_catalog_sub = subscriber.NsdStatusSubscriber(*args)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
+ args = [self.log, self.dts, self.loop]
args.append(proxy)
self.endpoint_rpc = rpc.EndpointDiscoveryRpcHandler(*args)
self.schema_rpc = rpc.SchemaRpcHandler(*args)
self.delete_rpc = rpc.PackageDeleteOperationsRpcHandler(*args)
- self.copy_rpc = rpc.PackageCopyOperationsRpcHandler(*(args + [self.copy_publisher]))
- args.append(self.job_handler)
+ args.append(self)
self.pkg_op = rpc.PackageOperationsRpcHandler(*args)
+ self.project_handler = ProjectHandler(self, PackageManagerProject,
+ proxy=proxy,)
except Exception as e:
- self.log.error("Exception caught rwpkgmgr start: %s", str(e))
+ self.log.exception("Exception caught rwpkgmgr start: %s", str(e))
else:
self.log.debug("rwpkgmgr started successfully!")
yield from self.endpoint_rpc.register()
yield from self.schema_rpc.register()
yield from self.pkg_op.register()
- yield from self.job_handler.register()
yield from self.delete_rpc.register()
- yield from self.copy_rpc.register()
- yield from self.copy_publisher.register()
- yield from self.vnfd_catalog_sub.register()
- yield from self.nsd_catalog_sub.register()
+
+ self.log.debug("creating project handler")
+ self.project_handler.register()
except Exception as e:
self.log.error("Exception caught rwpkgmgr init %s", str(e))
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# Author(s): Varun Prasad
# Creation Date: 09/25/2016
-#
+#
+import gi
import os
-import io
-import shutil
import rift.mano.dts as mano_dts
-import rift.package.package as package
-import rift.package.store as store
-import rift.package.convert as convert
+import rift.package.store as store
+from rift.package.convert import (
+ RwVnfdSerializer,
+ RwNsdSerializer,
+)
from gi.repository import (
RwYang,
- NsdYang,
- RwNsdYang,
- VnfdYang,
- RwVnfdYang,
RwDts
)
class DownloadStatusSubscriber(mano_dts.AbstractOpdataSubscriber):
+ def __init__(self, log, dts, loop, project, callback):
+ super().__init__(log, dts, loop, project, callback)
- def __init__(self, log, dts, loop, callback):
- super().__init__(log, dts, loop, callback)
-
- def get_xpath(self):
- return ("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+ def get_xpath(self):
+ return self._project.add_project(
+ "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
-class VnfdStatusSubscriber(DownloadStatusSubscriber):
+
+class VnfdStatusSubscriber(mano_dts.VnfdCatalogSubscriber):
DOWNLOAD_DIR = store.VnfdPackageFilesystemStore.DEFAULT_ROOT_DIR
- MODULE_DESC = 'vnfd rw-vnfd'.split()
DESC_TYPE = 'vnfd'
-
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop, self.on_change)
- self.subscriber = mano_dts.VnfdCatalogSubscriber(log, dts, loop)
+ SERIALIZER = RwVnfdSerializer()
+
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project, callback=self.on_change)
- def on_change(self, msg, action):
- log_msg = "1. Vnfd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
+ def on_change(self, msg, action):
+ log_msg = "1. Vnfd called w/ msg attributes: {} id {} name {} action: {}". \
+ format(repr(msg), msg.id, msg.name, repr(action))
self.log.debug(log_msg)
- if action == RwDts.QueryAction.UPDATE:
- actionCreate(self, msg)
+ if action == RwDts.QueryAction.UPDATE or action == RwDts.QueryAction.CREATE:
+ actionCreate(self, msg, self.project.name)
else:
self.log.debug("VnfdStatusSubscriber: No action for {}".format(repr(action)))
pass
- def get_xpath(self):
- return self.subscriber.get_xpath()
-
-class NsdStatusSubscriber(DownloadStatusSubscriber):
+class NsdStatusSubscriber(mano_dts.NsdCatalogSubscriber):
DOWNLOAD_DIR = store.NsdPackageFilesystemStore.DEFAULT_ROOT_DIR
- MODULE_DESC = 'nsd rw-nsd'.split()
DESC_TYPE = 'nsd'
-
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop, self.on_change)
- self.subscriber = mano_dts.NsdCatalogSubscriber(log, dts, loop)
+ SERIALIZER = RwNsdSerializer()
- def on_change(self, msg, action):
- log_msg = "1. Nsd called w/ msg attributes: {} id {} name {} action: {}".format(repr(msg), msg.id, msg.name, repr(action))
+ def __init__(self, log, dts, loop, project):
+ super().__init__(log, dts, loop, project, callback=self.on_change)
+
+ def on_change(self, msg, action):
+ log_msg = "1. Nsd called w/ msg attributes: {} id {} name {} action: {}". \
+ format(repr(msg), msg.id, msg.name, repr(action))
self.log.debug(log_msg)
- if action == RwDts.QueryAction.UPDATE:
- actionCreate(self, msg)
+ if action == RwDts.QueryAction.UPDATE or action == RwDts.QueryAction.CREATE:
+ actionCreate(self, msg, self.project.name)
else:
self.log.debug("NsdStatusSubscriber: No action for {}".format(repr(action)))
pass
- def get_xpath(self):
- return self.subscriber.get_xpath()
-
-def actionCreate(descriptor, msg):
- ''' Create folder structure if it doesn't exist: id/vnf name OR id/nsd name
+def actionCreate(descriptor, msg, project_name=None):
+ ''' Create folder structure if it doesn't exist: id/vnf name OR id/nsd name
Serialize the Vnfd/Nsd object to yaml and store yaml file in the created folder.
'''
- desc_name = msg.name if msg.name else ""
- download_dir = os.path.join(descriptor.DOWNLOAD_DIR, msg.id)
+ download_dir = os.path.join(
+ descriptor.DOWNLOAD_DIR,
+ project_name if project_name else "",
+ msg.id)
- # If a download dir is present with contents, then we know it has been created in the
- # upload path.
+ # If a download dir is present with contents, then we know it has been created in the
+ # upload path.
if os.path.exists(download_dir) and os.listdir(download_dir):
descriptor.log.debug("Skpping folder creation, {} already present".format(download_dir))
return
- else:
+ else:
# Folder structure is based on top-level package-id directory
if not os.path.exists(download_dir):
os.makedirs(download_dir)
descriptor.log.debug("Created directory {}".format(download_dir))
-
- model = RwYang.Model.create_libncx()
- for module in descriptor.MODULE_DESC: model.load_module(module)
-
- yaml_path = "{base}/{name}_{type}.yaml".format(base=download_dir, name=msg.name, type=descriptor.DESC_TYPE)
- with open(yaml_path,"w") as fh:
- fh.write(msg.to_yaml(model))
-
+ yaml_path = "{base}/{name}_{type}.yaml". \
+ format(base=download_dir, name=msg.name[0:50], type=descriptor.DESC_TYPE)
+ with open(yaml_path,"w") as fh:
+ fh.write(descriptor.SERIALIZER.to_yaml_string(msg))
#
##
-# utest_subscriber_dts
+# utest_pkgmgr_subscriber_dts
##
-rift_py3test(utest_subscriber_dts.py
+rift_py3test(utest_pkgmgr_subscriber_dts.py
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_subscriber_dts.py
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_subscriber_dts.py
)
##
-# utest_publisher_dts
+# utest_pkgmgr_publisher_dts
##
-rift_py3test(utest_publisher_dts.test_download_publisher
+rift_py3test(utest_pkgmgr_publisher_dts.test_download_publisher
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_download_publisher
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_publisher_dts.py TestCase.test_download_publisher
)
-rift_py3test(utest_publisher_dts.test_publish
+rift_py3test(utest_pkgmgr_publisher_dts.test_publish
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_publish
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_publisher_dts.py TestCase.test_publish
)
-rift_py3test(utest_publisher_dts.test_url_download
+rift_py3test(utest_pkgmgr_publisher_dts.test_url_download
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_url_download
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_publisher_dts.py TestCase.test_url_download
)
-rift_py3test(utest_publisher_dts.test_url_download_unreachable_ip
+rift_py3test(utest_pkgmgr_publisher_dts.test_url_download_unreachable_ip
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_url_download_unreachable_ip
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_publisher_dts.py TestCase.test_url_download_unreachable_ip
)
-rift_py3test(utest_publisher_dts.test_cancelled
+rift_py3test(utest_pkgmgr_publisher_dts.test_cancelled
TEST_ARGS
- ${CMAKE_CURRENT_SOURCE_DIR}/utest_publisher_dts.py TestCase.test_cancelled
+ ${CMAKE_CURRENT_SOURCE_DIR}/utest_pkgmgr_publisher_dts.py TestCase.test_cancelled
)
-add_custom_target(utest_publisher_dts.py
+add_custom_target(utest_pkgmgr_publisher_dts.py
DEPENDS
- utest_publisher_dts.test_download_publisher
- utest_publisher_dts.test_publish
- utest_publisher_dts.test_url_download
- utest_publisher_dts.test_url_download_unreachable_ip
- utest_publisher_dts.test_cancelled
+ utest_pkgmgr_publisher_dts.test_download_publisher
+ utest_pkgmgr_publisher_dts.test_publish
+ utest_pkgmgr_publisher_dts.test_url_download
+ utest_pkgmgr_publisher_dts.test_url_download_unreachable_ip
+ utest_pkgmgr_publisher_dts.test_cancelled
)
##
add_custom_target(rwpkgmgmt_test
DEPENDS
utest_filesystem_proxy_dts.py
- utest_publisher_dts.py
- utest_subscriber_dts.py
+ utest_pkgmgr_publisher_dts.py
+ utest_pkgmgr_subscriber_dts.py
)
import uuid
import xmlrunner
+# Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwPkgMgmtYang', '1.0')
import rift.tasklets.rwpkgmgr.publisher as pkg_publisher
import rift.tasklets.rwpkgmgr.rpc as rpc
import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
TEST_STRING = "foobar"
+
+class MockPublisher(object):
+ def __init__(self, uid):
+ self.assert_uid = uid
+
+ @asyncio.coroutine
+ def register_downloader(self, *args):
+ return self.assert_uid
+
+
+class MockProject(ManoProject):
+ def __init__(self, log, uid=None):
+ super().__init__(log, name=DEFAULT_PROJECT)
+ self.job_handler = MockPublisher(uid)
+
+
+class MockTasklet:
+ def __init__(self, log, uid=None):
+ self.log = log
+ self.projects = {}
+ project = MockProject(self.log,
+ uid=uid)
+ project.publisher = None
+ self.projects[project.name] = project
+
+
class TestCase(rift.test.dts.AbstractDTSTest):
@classmethod
def configure_schema(cls):
def tearDown(self):
super().tearDown()
- def create_mock_package(self):
+ def create_mock_package(self, project):
uid = str(uuid.uuid4())
path = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
+ os.getenv('RIFT_VAR_ROOT'),
"launchpad/packages/vnfd",
+ project,
uid)
asset_path = os.path.join(path, "icons")
Verifies the following:
The endpoint RPC returns a URL
"""
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
endpoint = rpc.EndpointDiscoveryRpcHandler(self.log, self.dts, self.loop, proxy)
yield from endpoint.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_GetPackageEndpoint.from_dict({
"package_type": "VNFD",
- "package_id": "BLAHID"})
+ "package_id": "BLAHID",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/get-package-endpoint",
for itr in rpc_out:
result = yield from itr
- assert result.result.endpoint == 'https://127.0.0.1:4567/api/package/vnfd/BLAHID'
+ assert result.result.endpoint == 'https://127.0.0.1:8008/mano/api/package/vnfd/{}/BLAHID'.format(DEFAULT_PROJECT)
@rift.test.dts.async_test
def test_schema_rpc(self):
Verifies the following:
The schema RPC return the schema structure
"""
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
endpoint = rpc.SchemaRpcHandler(self.log, self.dts, self.loop, proxy)
yield from endpoint.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_GetPackageSchema.from_dict({
- "package_type": "VNFD"})
+ "package_type": "VNFD",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/get-package-schema",
1. The file RPC returns a valid UUID thro' DTS
"""
assert_uid = str(uuid.uuid4())
- class MockPublisher:
- @asyncio.coroutine
- def register_downloader(self, *args):
- return assert_uid
- uid, path = self.create_mock_package()
+ uid, path = self.create_mock_package(DEFAULT_PROJECT)
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
endpoint = rpc.PackageOperationsRpcHandler(
self.log,
self.dts,
self.loop,
proxy,
- MockPublisher())
+ MockTasklet(self.log, uid=assert_uid))
yield from endpoint.register()
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageFileAdd.from_dict({
"package_type": "VNFD",
"package_id": uid,
"external_url": "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell",
- "package_path": "script/rift-shell"})
+ "package_path": "script/rift-shell",
+ "project_name": DEFAULT_PROJECT})
rpc_out = yield from self.dts.query_rpc(
"I,/rw-pkg-mgmt:package-file-add",
Integration test:
1. Verify the end to end flow of package ADD (NO MOCKS)
"""
- uid, path = self.create_mock_package()
+ uid, path = self.create_mock_package(DEFAULT_PROJECT)
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
- publisher = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
+ tasklet = MockTasklet(self.log, uid=uid)
+ project = tasklet.projects[DEFAULT_PROJECT]
+ publisher = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop, project)
+ project.job_handler = publisher
endpoint = rpc.PackageOperationsRpcHandler(
self.log,
self.dts,
self.loop,
proxy,
- publisher)
+ tasklet)
yield from publisher.register()
yield from endpoint.register()
"package_type": "VNFD",
"package_id": uid,
"external_url": "https://raw.githubusercontent.com/RIFTIO/RIFT.ware/master/rift-shell",
+ "project_name": DEFAULT_PROJECT,
"vnfd_file_type": "ICONS",
"package_path": "rift-shell"})
yield from asyncio.sleep(5, loop=self.loop)
filepath = os.path.join(path, ip.vnfd_file_type.lower(), ip.package_path)
+ self.log.debug("Filepath: {}".format(filepath))
assert os.path.isfile(filepath)
mode = oct(os.stat(filepath)[stat.ST_MODE])
assert str(mode) == "0o100664"
Integration test:
1. Verify the end to end flow of package ADD (NO MOCKS)
"""
- uid, path = self.create_mock_package()
+ uid, path = self.create_mock_package(DEFAULT_PROJECT)
- proxy = filesystem.FileSystemProxy(self.loop, self.log)
+ proxy = filesystem.FileSystemProxy(self.loop, self.log, self.dts)
endpoint = rpc.PackageDeleteOperationsRpcHandler(
self.log,
self.dts,
ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageFileDelete.from_dict({
"package_type": "VNFD",
"package_id": uid,
+ "package_path": "logo.png",
"vnfd_file_type": "ICONS",
- "package_path": "logo.png"})
+ "project_name": DEFAULT_PROJECT})
assert os.path.isfile(os.path.join(path, ip.vnfd_file_type.lower(), ip.package_path))
--- /dev/null
+#!/usr/bin/env python3
+
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import asyncio
+import gi
+import logging
+import mock
+import os
+import sys
+import unittest
+import uuid
+import xmlrunner
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ RwPkgMgmtYang
+ )
+import rift.tasklets.rwpkgmgr.downloader as downloader
+import rift.tasklets.rwpkgmgr.publisher as pkg_publisher
+import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+class TestCase(rift.test.dts.AbstractDTSTest):
+ @classmethod
+ def configure_schema(cls):
+ return RwPkgMgmtYang.get_schema()
+
+ @classmethod
+ def configure_timeout(cls):
+ return 240
+
+ def configure_test(self, loop, test_id):
+ self.log.debug("STARTING - %s", test_id)
+ self.tinfo = self.new_tinfo(str(test_id))
+ self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
+
+ self.job_handler = pkg_publisher.DownloadStatusPublisher(self.log, self.dts,
+ self.loop, self.project)
+
+ def tearDown(self):
+ super().tearDown()
+
+ @asyncio.coroutine
+ def get_published_xpaths(self):
+ published_xpaths = set()
+
+ res_iter = yield from self.dts.query_read("D,/rwdts:dts")
+ for i in res_iter:
+ res = (yield from i).result
+ for member in res.member:
+ published_xpaths |= {reg.keyspec for reg in member.state.registration if reg.flags == "publisher"}
+
+ return published_xpaths
+
+ @asyncio.coroutine
+ def read_xpath(self, xpath):
+ itr = yield from self.dts.query_read(xpath)
+
+ result = None
+ for fut in itr:
+ result = yield from fut
+ return result.result
+
+ @rift.test.dts.async_test
+ def test_download_publisher(self):
+ yield from self.job_handler.register()
+ published_xpaths = yield from self.get_published_xpaths()
+ assert self.job_handler.xpath() in published_xpaths
+
+ @rift.test.dts.async_test
+ def test_publish(self):
+ """
+ Asserts:
+ 1. Verify if an update on_download_progess & on_download_finished
+ triggers a DTS update
+ 2. Verify if the internal store is updated
+ """
+ yield from self.job_handler.register()
+
+ mock_msg = RwPkgMgmtYang.YangData_RwProject_Project_DownloadJobs_Job.from_dict({
+ "url": "http://foo/bar",
+ "package_id": "123",
+ "download_id": str(uuid.uuid4())})
+
+ yield from self.job_handler._dts_publisher(mock_msg)
+ yield from asyncio.sleep(5, loop=self.loop)
+
+ xpath = self.project.add_project("/download-jobs/job[download-id={}]".
+ format(quoted_key(mock_msg.download_id)))
+ itr = yield from self.dts.query_read(xpath)
+
+ result = None
+ for fut in itr:
+ result = yield from fut
+ result = result.result
+
+ self.log.debug("Mock msg: {}".format(mock_msg))
+ assert result == mock_msg
+
+ # Modify the msg
+ mock_msg.url = "http://bar/foo"
+ yield from self.job_handler._dts_publisher(mock_msg)
+ yield from asyncio.sleep(5, loop=self.loop)
+
+ itr = yield from self.dts.query_read(xpath)
+
+ result = None
+ for fut in itr:
+ result = yield from fut
+ result = result.result
+ assert result == mock_msg
+
+
+ @rift.test.dts.async_test
+ def test_url_download(self):
+ """
+ Integration Test:
+ Test the updates with download/url.py
+ """
+ yield from self.job_handler.register()
+
+ proxy = mock.MagicMock()
+
+ url = "http://sharedfiles/common/unittests/plantuml.jar"
+ url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
+
+ download_id = yield from self.job_handler.register_downloader(url_downloader)
+ assert download_id is not None
+
+ # Waiting for 5 secs to be sure that the file is downloaded
+ yield from asyncio.sleep(10, loop=self.loop)
+ xpath = self.project.add_project("/download-jobs/job[download-id={}]".format(
+ quoted_key(download_id)))
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before complete check - %s", result)
+ assert result.status == "COMPLETED"
+ assert len(self.job_handler.tasks) == 0
+
+ @rift.test.dts.async_test
+ def test_url_download_unreachable_ip(self):
+ """
+ Integration Test:
+ Ensure that a bad IP does not block forever
+ """
+ yield from self.job_handler.register()
+
+ proxy = mock.MagicMock()
+
+ # Here, we are assuming that there is no HTTP server at 10.1.2.3
+ url = "http://10.1.2.3/common/unittests/plantuml.jar"
+ url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
+ self.log.debug("Downloader url: {}".format(url_downloader))
+
+ download_id = yield from self.job_handler.register_downloader(url_downloader)
+ self.log.debug("Download id: {}".format(download_id))
+ assert download_id is not None
+
+ # Waiting for 60 secs to be sure all reconnect attempts have been exhausted
+ yield from asyncio.sleep(60, loop=self.loop)
+ xpath = self.project.add_project("/download-jobs/job[download-id={}]".
+ format(quoted_key(download_id)))
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before complete check - %s", result)
+ assert result.status == "FAILED"
+ assert len(self.job_handler.tasks) == 0
+
+
+ @rift.test.dts.async_test
+ def test_cancelled(self):
+ """
+ Integration Test:
+ 1. Test the updates with downloader.py
+ 2. Verifies if cancel triggers the job status to move to cancelled
+ """
+ yield from self.job_handler.register()
+
+ proxy = mock.MagicMock()
+ url = "http://sharedfiles/common/unittests/Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
+ url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
+
+ download_id = yield from self.job_handler.register_downloader(url_downloader)
+ assert download_id is not None
+ xpath = self.project.add_project("/download-jobs/job[download-id={}]".
+ format(quoted_key(download_id)))
+
+ # wait long enough to have the state be in IN_PROGRESS
+ yield from asyncio.sleep(0.2, loop=self.loop)
+
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before in_progress check - %s", result)
+ assert result.status == "IN_PROGRESS"
+
+ yield from self.job_handler.cancel_download(download_id)
+ yield from asyncio.sleep(3, loop=self.loop)
+ result = yield from self.read_xpath(xpath)
+ self.log.debug("Test result before cancel check - %s", result)
+ assert result.status == "CANCELLED"
+ assert len(self.job_handler.tasks) == 0
+
+
+def main():
+ runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-v', '--verbose', action='store_true')
+ parser.add_argument('-n', '--no-runner', action='store_true')
+ args, unittest_args = parser.parse_known_args()
+ if args.no_runner:
+ runner = None
+
+ TestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
+
+ unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import asyncio
+import gi
+import sys
+import unittest
+import uuid
+import os
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+from gi.repository import (
+ RwPkgMgmtYang,
+ RwDts as rwdts,
+ )
+import rift.tasklets.rwpkgmgr.subscriber as pkg_subscriber
+import rift.test.dts
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class DescriptorPublisher(object):
+ # TODO: Need to be moved to a central page, too many copy pastes
+ def __init__(self, log, dts, loop):
+ self.log = log
+ self.loop = loop
+ self.dts = dts
+
+ self._registrations = []
+
+ @asyncio.coroutine
+ def publish(self, w_path, path, desc):
+ ready_event = asyncio.Event(loop=self.loop)
+
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self.log.debug("Create element: %s, obj-type:%s obj:%s",
+ path, type(desc), desc)
+ with self.dts.transaction() as xact:
+ regh.create_element(path, desc, xact.xact)
+ self.log.debug("Created element: %s, obj:%s", path, desc)
+ ready_event.set()
+
+ handler = rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready
+ )
+
+ self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+ reg = yield from self.dts.register(
+ w_path,
+ handler,
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
+ )
+ self._registrations.append(reg)
+ self.log.debug("Registered path : %s", w_path)
+ yield from ready_event.wait()
+
+ return reg
+
+ def unpublish_all(self):
+ self.log.debug("Deregistering all published descriptors")
+ for reg in self._registrations:
+ reg.deregister()
+
+class SubscriberStoreDtsTestCase(rift.test.dts.AbstractDTSTest):
+ @classmethod
+ def configure_schema(cls):
+ return RwPkgMgmtYang.get_schema()
+
+ @classmethod
+ def configure_timeout(cls):
+ return 240
+
+ def configure_test(self, loop, test_id):
+ self.log.debug("STARTING - %s", test_id)
+ self.tinfo = self.new_tinfo(str(test_id))
+ self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
+ self.project = ManoProject(self.log, name=DEFAULT_PROJECT)
+
+ def tearDown(self):
+ super().tearDown()
+
+ @rift.test.dts.async_test
+ def test_download_status_handler(self):
+
+ mock_msg = RwPkgMgmtYang.YangData_RwProject_Project_DownloadJobs_Job.from_dict({
+ "url": "http://foo/bar",
+ "package_id": "123",
+ "download_id": str(uuid.uuid4())})
+
+ w_xpath = self.project.add_project("D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job")
+ xpath = "{}[download-id={}]".format(w_xpath, quoted_key(mock_msg.download_id))
+
+ mock_called = False
+ def mock_cb(msg, status):
+ nonlocal mock_called
+ assert msg == mock_msg
+ mock_called = True
+
+ sub = pkg_subscriber.DownloadStatusSubscriber(
+ self.log,
+ self.dts,
+ self.loop,
+ self.project,
+ callback=mock_cb)
+
+ yield from sub.register()
+ yield from asyncio.sleep(1, loop=self.loop)
+
+ yield from self.publisher.publish(w_xpath, xpath, mock_msg)
+ yield from asyncio.sleep(1, loop=self.loop)
+
+ assert mock_called is True
+
+
+def main(argv=sys.argv[1:]):
+
+ # The unittest framework requires a program name, so use the name of this
+ # file instead (we do not want to have to pass a fake program name to main
+ # when this is called from the interpreter).
+ unittest.main(
+ argv=[__file__] + argv,
+ testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ )
+
+if __name__ == '__main__':
+ main()
+++ /dev/null
-#!/usr/bin/env python3
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import argparse
-import asyncio
-import logging
-import mock
-import os
-import sys
-import unittest
-import uuid
-import xmlrunner
-
-import gi
-gi.require_version('RwDts', '1.0')
-gi.require_version('RwPkgMgmtYang', '1.0')
-from gi.repository import (
- RwDts as rwdts,
- RwPkgMgmtYang
- )
-import rift.tasklets.rwpkgmgr.downloader as downloader
-import rift.tasklets.rwpkgmgr.publisher as pkg_publisher
-import rift.test.dts
-
-
-class TestCase(rift.test.dts.AbstractDTSTest):
- @classmethod
- def configure_schema(cls):
- return RwPkgMgmtYang.get_schema()
-
- @classmethod
- def configure_timeout(cls):
- return 240
-
- def configure_test(self, loop, test_id):
- self.log.debug("STARTING - %s", test_id)
- self.tinfo = self.new_tinfo(str(test_id))
- self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
-
- self.job_handler = pkg_publisher.DownloadStatusPublisher(self.log, self.dts, self.loop)
-
- def tearDown(self):
- super().tearDown()
-
- @asyncio.coroutine
- def get_published_xpaths(self):
- published_xpaths = set()
-
- res_iter = yield from self.dts.query_read("D,/rwdts:dts")
- for i in res_iter:
- res = (yield from i).result
- for member in res.member:
- published_xpaths |= {reg.keyspec for reg in member.state.registration if reg.flags == "publisher"}
-
- return published_xpaths
-
- @asyncio.coroutine
- def read_xpath(self, xpath):
- itr = yield from self.dts.query_read(xpath)
-
- result = None
- for fut in itr:
- result = yield from fut
- return result.result
-
- @rift.test.dts.async_test
- def test_download_publisher(self):
- yield from self.job_handler.register()
- published_xpaths = yield from self.get_published_xpaths()
- assert self.job_handler.xpath() in published_xpaths
-
- @rift.test.dts.async_test
- def test_publish(self):
- """
- Asserts:
- 1. Verify if an update on_download_progess & on_download_finished
- triggers a DTS update
- 2. Verify if the internal store is updated
- """
- yield from self.job_handler.register()
-
- mock_msg = RwPkgMgmtYang.DownloadJob.from_dict({
- "url": "http://foo/bar",
- "package_id": "123",
- "download_id": str(uuid.uuid4())})
-
- yield from self.job_handler._dts_publisher(mock_msg)
- yield from asyncio.sleep(5, loop=self.loop)
-
- itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
- mock_msg.download_id))
-
- result = None
- for fut in itr:
- result = yield from fut
- result = result.result
-
- print ("Mock ", mock_msg)
- assert result == mock_msg
-
- # Modify the msg
- mock_msg.url = "http://bar/foo"
- yield from self.job_handler._dts_publisher(mock_msg)
- yield from asyncio.sleep(5, loop=self.loop)
-
- itr = yield from self.dts.query_read("/download-jobs/job[download-id='{}']".format(
- mock_msg.download_id))
-
- result = None
- for fut in itr:
- result = yield from fut
- result = result.result
- assert result == mock_msg
-
-
- @rift.test.dts.async_test
- def test_url_download(self):
- """
- Integration Test:
- Test the updates with download/url.py
- """
- yield from self.job_handler.register()
-
- proxy = mock.MagicMock()
-
- url = "http://boson.eng.riftio.com/common/unittests/plantuml.jar"
- url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
-
- download_id = yield from self.job_handler.register_downloader(url_downloader)
- assert download_id is not None
-
- # Waiting for 5 secs to be sure that the file is downloaded
- yield from asyncio.sleep(10, loop=self.loop)
- xpath = "/download-jobs/job[download-id='{}']".format(
- download_id)
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before complete check - %s", result)
- assert result.status == "COMPLETED"
- assert len(self.job_handler.tasks) == 0
-
- @rift.test.dts.async_test
- def test_url_download_unreachable_ip(self):
- """
- Integration Test:
- Ensure that a bad IP does not block forever
- """
- yield from self.job_handler.register()
-
- proxy = mock.MagicMock()
-
- # Here, we are assuming that there is no HTTP server at 10.1.2.3
- url = "http://10.1.2.3/common/unittests/plantuml.jar"
- url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
-
- download_id = yield from self.job_handler.register_downloader(url_downloader)
- assert download_id is not None
-
- # Waiting for 10 secs to be sure all reconnect attempts have been exhausted
- yield from asyncio.sleep(10, loop=self.loop)
- xpath = "/download-jobs/job[download-id='{}']".format(
- download_id)
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before complete check - %s", result)
- assert result.status == "FAILED"
- assert len(self.job_handler.tasks) == 0
-
-
- @rift.test.dts.async_test
- def test_cancelled(self):
- """
- Integration Test:
- 1. Test the updates with downloader.py
- 2. Verifies if cancel triggers the job status to move to cancelled
- """
- yield from self.job_handler.register()
-
- proxy = mock.MagicMock()
- url = "http://boson.eng.riftio.com/common/unittests/Fedora-x86_64-20-20131211.1-sda-ping.qcow2"
- url_downloader = downloader.PackageFileDownloader(url, "1", "/", "VNFD", "SCRIPTS", "VNF_CONFIG", proxy)
-
- download_id = yield from self.job_handler.register_downloader(url_downloader)
- assert download_id is not None
- xpath = "/download-jobs/job[download-id='{}']".format(
- download_id)
-
- yield from asyncio.sleep(1, loop=self.loop)
-
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before in_progress check - %s", result)
- assert result.status == "IN_PROGRESS"
-
- yield from self.job_handler.cancel_download(download_id)
- yield from asyncio.sleep(3, loop=self.loop)
- result = yield from self.read_xpath(xpath)
- self.log.debug("Test result before cancel check - %s", result)
- assert result.status == "CANCELLED"
- assert len(self.job_handler.tasks) == 0
-
-
-def main():
- runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
-
- parser = argparse.ArgumentParser()
- parser.add_argument('-v', '--verbose', action='store_true')
- parser.add_argument('-n', '--no-runner', action='store_true')
- args, unittest_args = parser.parse_known_args()
- if args.no_runner:
- runner = None
-
- TestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
-
- unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
-
-if __name__ == '__main__':
- main()
+++ /dev/null
-
-#
-# Copyright 2016 RIFT.IO Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import asyncio
-import sys
-import unittest
-import uuid
-
-import gi
-gi.require_version('RwDtsYang', '1.0')
-gi.require_version('RwPkgMgmtYang', '1.0')
-from gi.repository import (
- RwPkgMgmtYang,
- RwDts as rwdts,
- )
-import rift.tasklets.rwpkgmgr.subscriber as pkg_subscriber
-import rift.test.dts
-
-
-class DescriptorPublisher(object):
- # TODO: Need to be moved to a central page, too many copy pastes
- def __init__(self, log, dts, loop):
- self.log = log
- self.loop = loop
- self.dts = dts
-
- self._registrations = []
-
- @asyncio.coroutine
- def publish(self, w_path, path, desc):
- ready_event = asyncio.Event(loop=self.loop)
-
- @asyncio.coroutine
- def on_ready(regh, status):
- self.log.debug("Create element: %s, obj-type:%s obj:%s",
- path, type(desc), desc)
- with self.dts.transaction() as xact:
- regh.create_element(path, desc, xact.xact)
- self.log.debug("Created element: %s, obj:%s", path, desc)
- ready_event.set()
-
- handler = rift.tasklets.DTS.RegistrationHandler(
- on_ready=on_ready
- )
-
- self.log.debug("Registering path: %s, obj:%s", w_path, desc)
- reg = yield from self.dts.register(
- w_path,
- handler,
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
- )
- self._registrations.append(reg)
- self.log.debug("Registered path : %s", w_path)
- yield from ready_event.wait()
-
- return reg
-
- def unpublish_all(self):
- self.log.debug("Deregistering all published descriptors")
- for reg in self._registrations:
- reg.deregister()
-
-class SubscriberStoreDtsTestCase(rift.test.dts.AbstractDTSTest):
- @classmethod
- def configure_schema(cls):
- return RwPkgMgmtYang.get_schema()
-
- @classmethod
- def configure_timeout(cls):
- return 240
-
- def configure_test(self, loop, test_id):
- self.log.debug("STARTING - %s", test_id)
- self.tinfo = self.new_tinfo(str(test_id))
- self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
- self.publisher = DescriptorPublisher(self.log, self.dts, self.loop)
-
- def tearDown(self):
- super().tearDown()
-
- @rift.test.dts.async_test
- def test_download_status_handler(self):
-
- mock_msg = RwPkgMgmtYang.DownloadJob.from_dict({
- "url": "http://foo/bar",
- "package_id": "123",
- "download_id": str(uuid.uuid4())})
-
- w_xpath = "D,/rw-pkg-mgmt:download-jobs/rw-pkg-mgmt:job"
- xpath = "{}[download-id='{}']".format(w_xpath, mock_msg.download_id)
-
- mock_called = False
- def mock_cb(msg, status):
- nonlocal mock_called
- assert msg == mock_msg
- mock_called = True
-
- sub = pkg_subscriber.DownloadStatusSubscriber(
- self.log,
- self.dts,
- self.loop,
- callback=mock_cb)
-
- yield from sub.register()
- yield from asyncio.sleep(1, loop=self.loop)
-
- yield from self.publisher.publish(w_xpath, xpath, mock_msg)
- yield from asyncio.sleep(1, loop=self.loop)
-
- assert mock_called is True
-
-
-def main(argv=sys.argv[1:]):
-
- # The unittest framework requires a program name, so use the name of this
- # file instead (we do not want to have to pass a fake program name to main
- # when this is called from the interpreter).
- unittest.main(
- argv=[__file__] + argv,
- testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
- )
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/tasklets/${TASKLET_NAME}/rwresmgr_config.py
rift/tasklets/${TASKLET_NAME}/rwresmgr_core.py
rift/tasklets/${TASKLET_NAME}/rwresmgr_events.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
self._parent = parent
self._cloud_sub = None
+ self._res_sub = None
+ self._project = parent._project
@asyncio.coroutine
def register(self):
yield from self.register_resource_pool_operational_data()
- self.register_cloud_account_config()
+ yield from self.register_cloud_account_config()
+
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
+ if self._cloud_sub:
+ self._cloud_sub.deregister()
+ self._cloud_sub = None
+
+ if self._res_sub:
+ self._res_sub.delete_element(
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA))
+ self._res_sub.deregister()
+ self._res_sub = None
+ @asyncio.coroutine
def register_cloud_account_config(self):
def on_add_cloud_account_apply(account):
self._log.debug("Received on_add_cloud_account: %s", account)
)
self._cloud_sub = rift.mano.cloud.CloudAccountConfigSubscriber(
- self._dts, self._log, self._rwlog_hdl, cloud_callbacks
- )
- self._cloud_sub.register()
+ self._dts, self._log, self._rwlog_hdl,
+ self._project, cloud_callbacks
+ )
+ yield from self._cloud_sub.register()
@asyncio.coroutine
def register_resource_pool_operational_data(self):
@asyncio.coroutine
def on_prepare(xact_info, action, ks_path, msg):
self._log.debug("ResourceMgr providing resource-pool information")
- msg = RwResourceMgrYang.ResourcePoolRecords()
+ msg = RwResourceMgrYang.YangData_RwProject_Project_ResourcePoolRecords()
cloud_accounts = self._parent.get_cloud_account_names()
for cloud_account_name in cloud_accounts:
cloud_account_msg.records.append(pool_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK,
- ResourceMgrConfig.XPATH_POOL_OPER_DATA,
+ self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA),
msg=msg,)
- self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: %s",
- ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ xpath = self._project.add_project(ResourceMgrConfig.XPATH_POOL_OPER_DATA)
+ self._log.debug("Registering for Resource Mgr resource-pool-record using xpath: {}".
+ format(xpath))
handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- response = yield from self._dts.register(xpath=ResourceMgrConfig.XPATH_POOL_OPER_DATA,
- handler=handler,
- flags=rwdts.Flag.PUBLISHER)
-
+ self._res_sub = yield from self._dts.register(xpath=xpath,
+ handler=handler,
+ flags=rwdts.Flag.PUBLISHER)
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
"19.0.0.0/24",
"20.0.0.0/24",
"21.0.0.0/24",
- "22.0.0.0/24",]
+ "22.0.0.0/24",
+ "23.0.0.0/24",
+ "24.0.0.0/24",
+ "25.0.0.0/24",
+ "26.0.0.0/24",
+ "27.0.0.0/24",
+ "28.0.0.0/24",
+ "29.0.0.0/24",
+ "30.0.0.0/24",
+ "31.0.0.0/24",
+ "32.0.0.0/24",
+ "33.0.0.0/24",
+ "34.0.0.0/24",
+ "35.0.0.0/24",
+ "36.0.0.0/24",
+ "37.0.0.0/24",
+ "38.0.0.0/24"]
self._subnet_ptr = 0
+ self._boot_cache = {'compute': []}
+ self._lock = asyncio.Lock(loop=self._loop)
+
+ def get_cloud_account(self):
+ return self._account
def _select_link_subnet(self):
subnet = self._subnets[self._subnet_ptr]
@asyncio.coroutine
def create_virtual_network(self, req_params):
- #rc, rsp = self._rwcal.get_virtual_link_list(self._account)
- self._log.debug("Calling get_virtual_link_list API")
- rc, rsp = yield from self._loop.run_in_executor(self._executor,
- self._rwcal.get_virtual_link_list,
- self._account)
-
- assert rc == RwStatus.SUCCESS
-
- links = [vlink for vlink in rsp.virtual_link_info_list if vlink.name == req_params.name]
- if links:
- self._log.debug("Found existing virtual-network with matching name in cloud. Reusing the virtual-network with id: %s" %(links[0].virtual_link_id))
+ rc, link = yield from self._loop.run_in_executor(self._executor,
+ self._rwcal.get_virtual_link_by_name,
+ self._account,
+ req_params.name)
+ if link:
+ self._log.debug("Found existing virtual-network with matching name in cloud. Reusing the virtual-network with id: %s" %(link.virtual_link_id))
if req_params.vim_network_name:
resource_type = 'precreated'
else:
# This is case of realloc
resource_type = 'dynamic'
- return (resource_type, links[0].virtual_link_id)
+ return (resource_type, link.virtual_link_id)
elif req_params.vim_network_name:
self._log.error("Virtual-network-allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist",
self._account.name, req_params.vim_network_name)
raise ResMgrCALOperationFailure("Virtual-network allocate operation failed for cloud account: %s Vim Network with name %s does not pre-exist"
%(self._account.name, req_params.vim_network_name))
- params = RwcalYang.VirtualLinkReqParams()
+ params = RwcalYang.YangData_RwProject_Project_VirtualLinkReqParams()
params.from_dict(req_params.as_dict())
params.subnet = self._select_link_subnet()
#rc, rs = self._rwcal.create_virtual_link(self._account, params)
network_id)
raise ResMgrCALOperationFailure("Virtual-network release operation failed for cloud account: %s. ResourceId: %s" %(self._account.name, network_id))
- @asyncio.coroutine
+ @asyncio.coroutine
def get_virtual_network_info(self, network_id):
#rc, rs = self._rwcal.get_virtual_link(self._account, network_id)
self._log.debug("Calling get_virtual_link_info API with id: %s" %(network_id))
@asyncio.coroutine
def create_virtual_compute(self, req_params):
- #rc, rsp = self._rwcal.get_vdu_list(self._account)
- self._log.debug("Calling get_vdu_list API")
-
- rc, rsp = yield from self._loop.run_in_executor(self._executor,
- self._rwcal.get_vdu_list,
- self._account)
- assert rc == RwStatus.SUCCESS
- vdus = [vm for vm in rsp.vdu_info_list if vm.name == req_params.name]
+ if not self._boot_cache['compute']:
+ self._log.debug("Calling get_vdu_list API")
+ yield from self._lock.acquire()
+ try:
+ self._log.debug("Populating compute cache ")
+ rc, rsp = yield from self._loop.run_in_executor(self._executor,
+ self._rwcal.get_vdu_list,
+ self._account)
+
+ if rc.status != RwStatus.SUCCESS:
+ self._log.error("Virtual-compute-info operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+ self._account.name, rc.error_msg, rc.traceback)
+ raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s, Error (%s)"
+ % (self._account.name, rc.error_msg))
+ self._boot_cache['compute'] = rsp.vdu_info_list
+ finally:
+ self._lock.release()
+ else:
+ self._log.debug("!!!!!!!! Found compute cache ")
+
+ vdus = [vm for vm in self._boot_cache['compute'] if vm.name == req_params.name]
+
if vdus:
self._log.debug("Found existing virtual-compute with matching name in cloud. Reusing the virtual-compute element with id: %s" %(vdus[0].vdu_id))
return vdus[0].vdu_id
- params = RwcalYang.VDUInitParams()
+ params = RwcalYang.YangData_RwProject_Project_VduInitParams()
params.from_dict(req_params.as_dict())
if 'image_name' in req_params:
image_checksum = req_params.image_checksum if req_params.has_field("image_checksum") else None
params.image_id = yield from self.get_image_id_from_image_info(req_params.image_name, image_checksum)
- #rc, rs = self._rwcal.create_vdu(self._account, params)
self._log.debug("Calling create_vdu API with params %s" %(str(params)))
rc, rs = yield from self._loop.run_in_executor(self._executor,
self._rwcal.create_vdu,
self._log.error("Virtual-compute-modify operation failed for cloud account: %s", self._account.name)
raise ResMgrCALOperationFailure("Virtual-compute-modify operation failed for cloud account: %s" %(self._account.name))
- @asyncio.coroutine
+ @asyncio.coroutine
def delete_virtual_compute(self, compute_id):
#rc = self._rwcal.delete_vdu(self._account, compute_id)
self._log.debug("Calling delete_vdu API with id: %s" %(compute_id))
+ # Delete the cache
+ self._boot_cache['compute'] = list()
+
rc = yield from self._loop.run_in_executor(self._executor,
self._rwcal.delete_vdu,
self._account,
compute_id)
raise ResMgrCALOperationFailure("Virtual-compute-release operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
- @asyncio.coroutine
- def get_virtual_compute_info(self, compute_id):
- #rc, rs = self._rwcal.get_vdu(self._account, compute_id)
+ @asyncio.coroutine
+ def get_virtual_compute_info(self, compute_id, mgmt_network=""):
+ #rc, rs = self._rwcal.get_vdu(self._account, compute_id, None)
self._log.debug("Calling get_vdu API with id: %s" %(compute_id))
rc, rs = yield from self._loop.run_in_executor(self._executor,
self._rwcal.get_vdu,
self._account,
- compute_id)
- if rc != RwStatus.SUCCESS:
- self._log.error("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s",
- self._account.name,
- compute_id)
- raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s. ResourceID: %s" %(self._account.name, compute_id))
+ compute_id,
+ mgmt_network)
+ if rc.status != RwStatus.SUCCESS:
+ self._log.error("Virtual-compute-info operation failed for cloud account: %s - error_msg: %s, Traceback: %s",
+ self._account.name, rc.error_msg, rc.traceback)
+ raise ResMgrCALOperationFailure("Virtual-compute-info operation failed for cloud account: %s, ResourceID: %s, Error (%s)"
+ %(self._account.name, compute_id, rc.error_msg))
return rs
@asyncio.coroutine
@asyncio.coroutine
def create_compute_flavor(self, request):
- flavor = RwcalYang.FlavorInfoItem()
+ flavor = RwcalYang.YangData_RwProject_Project_VimResources_FlavorinfoList()
flavor.name = str(uuid.uuid4())
epa_types = ['vm_flavor', 'guest_epa', 'host_epa', 'host_aggregate']
epa_dict = {k: v for k, v in request.as_dict().items() if k in epa_types}
return resource_info
def get_pool_info(self):
- info = RwResourceMgrYang.ResourceRecordInfo()
+ info = RwResourceMgrYang.YangData_RwProject_Project_ResourcePoolRecords_CloudAccount_Records()
self._log.info("Providing info for pool: %s", self.name)
info.name = self.name
if self.pool_type:
@asyncio.coroutine
def allocate_dynamic_resource(self, request):
resource_type, resource_id = yield from self._cal.create_virtual_network(request)
- if resource_id in self._all_resources:
- self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
- raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
+ # Removing the following check (RIFT-15144 MANO fails to attach to existing VIM network)
+ #if resource_id in self._all_resources:
+ # self._log.error("Resource with id %s name %s of type %s is already used", resource_id, request.name, resource_type)
+ # raise ResMgrNoResourcesAvailable("Resource with name %s of type network is already used" %(resource_id))
resource = self._resource_class(resource_id, resource_type, request)
self._all_resources[resource_id] = resource
self._allocated_resources[resource_id] = resource
info = yield from self._cal.get_virtual_network_info(resource.resource_id)
self._log.info("Successfully retrieved virtual-network information from CAL with resource-id: %s. Info: %s",
resource.resource_id, str(info))
- response = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_ResourceInfo()
response.from_dict(info.as_dict())
response.pool_name = self.name
response.resource_state = 'active'
if resource is None:
raise ResMgrNoResourcesAvailable("No matching resource available for allocation from pool: %s" %(self.name))
- requested_params = RwcalYang.VDUInitParams()
+ requested_params = RwcalYang.YangData_RwProject_Project_VduInitParams()
requested_params.from_dict(request.as_dict())
resource.requested_params = requested_params
return resource
@asyncio.coroutine
def get_resource_info(self, resource):
- info = yield from self._cal.get_virtual_compute_info(resource.resource_id)
+ mgmt_network = ""
+ if resource.request.mgmt_network is not None:
+ mgmt_network = resource.request.mgmt_network
+ info = yield from self._cal.get_virtual_compute_info(resource.resource_id, mgmt_network=mgmt_network)
self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
resource.resource_id, str(info))
- response = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response.from_dict(info.as_dict())
response.pool_name = self.name
response.resource_state = self._get_resource_state(info, resource.requested_params)
info = yield from self._cal.get_virtual_compute_info(resource_id)
self._log.info("Successfully retrieved virtual-compute information from CAL with resource-id: %s. Info: %s",
resource_id, str(info))
- return info
+ return info
def _get_resource_state(self, resource_info, requested_params):
if resource_info.state == 'failed':
self._log.error("<Compute-Resource: %s> Reached failed state.",
resource_info.name)
+ self._log.error("<Compute-Resource: {}> info at the time of failure: {}".format(resource_info.name, str(resource_info)))
return 'failed'
if resource_info.state != 'active':
return 'pending'
if (requested_params.has_field('allocate_public_address')) and (requested_params.allocate_public_address == True):
- if not resource_info.has_field('public_ip'):
- self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for public ip, %s",
- resource_info.name, requested_params)
- return 'pending'
+ if not resource_info.has_field('public_ip'):
+ self._log.warning("<Compute-Resource: %s> Management IP not assigned- waiting for public ip, %s",
+ resource_info.name, requested_params)
+ return 'pending'
if not conn_pts_len_equal():
self._log.warning("<Compute-Resource: %s> Waiting for requested number of ports to be assigned to virtual-compute, requested: %d, assigned: %d",
elif available.has_field('pcie_device'):
self._log.debug("Rejecting available flavor because pcie_device not required but available")
return False
-
-
+
+
if required.has_field('mempage_size'):
self._log.debug("Matching mempage_size")
if available.has_field('mempage_size') == False:
elif available.has_field('mempage_size'):
self._log.debug("Rejecting available flavor because mempage_size not required but available")
return False
-
+
if required.has_field('cpu_pinning_policy'):
self._log.debug("Matching cpu_pinning_policy")
if required.cpu_pinning_policy != 'ANY':
elif available.has_field('cpu_pinning_policy'):
self._log.debug("Rejecting available flavor because cpu_pinning_policy not required but available")
return False
-
+
if required.has_field('cpu_thread_pinning_policy'):
self._log.debug("Matching cpu_thread_pinning_policy")
if available.has_field('cpu_thread_pinning_policy') == False:
elif available.has_field('trusted_execution'):
self._log.debug("Rejecting available flavor because trusted_execution not required but available")
return False
-
+
if required.has_field('numa_node_policy'):
self._log.debug("Matching numa_node_policy")
if available.has_field('numa_node_policy') == False:
elif available.numa_node_policy.has_field('node_cnt'):
self._log.debug("Rejecting available flavor because numa node count not required but available")
return False
-
+
if required.numa_node_policy.has_field('mem_policy'):
self._log.debug("Matching numa_node_policy mem_policy")
if available.numa_node_policy.has_field('mem_policy') == False:
elif available.has_field('cpu_model'):
self._log.debug("Rejecting available flavor because cpu_model not required but available")
return False
-
+
if required.has_field('cpu_arch'):
self._log.debug("Matching CPU architecture")
if available.has_field('cpu_arch') == False:
elif available.has_field('cpu_arch'):
self._log.debug("Rejecting available flavor because cpu_arch not required but available")
return False
-
+
if required.has_field('cpu_vendor'):
self._log.debug("Matching CPU vendor")
if available.has_field('cpu_vendor') == False:
elif available.has_field('cpu_socket_count'):
self._log.debug("Rejecting available flavor because cpu_socket_count not required but available")
return False
-
+
if required.has_field('cpu_core_count'):
self._log.debug("Matching CPU core count")
if available.has_field('cpu_core_count') == False:
elif available.has_field('cpu_core_count'):
self._log.debug("Rejecting available flavor because cpu_core_count not required but available")
return False
-
+
if required.has_field('cpu_core_thread_count'):
self._log.debug("Matching CPU core thread count")
if available.has_field('cpu_core_thread_count') == False:
elif available.has_field('cpu_core_thread_count'):
self._log.debug("Rejecting available flavor because cpu_core_thread_count not required but available")
return False
-
+
if required.has_field('cpu_feature'):
self._log.debug("Matching CPU feature list")
if available.has_field('cpu_feature') == False:
elif available.has_field('cpu_feature'):
self._log.debug("Rejecting available flavor because cpu_feature not required but available")
return False
- self._log.info("Successful match for Host EPA attributes")
+ self._log.info("Successful match for Host EPA attributes")
return True
def _match_placement_group_inputs(self, required, available):
self._log.info("Matching Host aggregate attributes")
-
+
if not required and not available:
# Host aggregate not required and not available => success
self._log.info("Successful match for Host Aggregate attributes")
# - Host aggregate not required but available
self._log.debug("Rejecting available flavor because host Aggregate mismatch. Required: %s, Available: %s ", required, available)
return False
-
-
+
+
def match_image_params(self, resource_info, request_params):
return True
if result == False:
self._log.debug("Host Aggregate mismatched")
return False
-
+
return True
@asyncio.coroutine
def initialize_resource_in_cal(self, resource, request):
self._log.info("Initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
- modify_params = RwcalYang.VDUModifyParams()
+ modify_params = RwcalYang.YangData_RwProject_Project_VduModifyParams()
modify_params.vdu_id = resource.resource_id
modify_params.image_id = request.image_id
point.virtual_link_id = c_point.virtual_link_id
yield from self._cal.modify_virtual_compute(modify_params)
- @asyncio.coroutine
+ @asyncio.coroutine
def uninitialize_resource_in_cal(self, resource):
self._log.info("Un-initializing the compute-resource with id: %s in RW.CAL", resource.resource_id)
- modify_params = RwcalYang.VDUModifyParams()
+ modify_params = RwcalYang.YangData_RwProject_Project_VduModifyParams()
modify_params.vdu_id = resource.resource_id
resource_info = yield from self.get_resource_info(resource)
for c_point in resource_info.connection_points:
""" Returns a list of configured cloud account names """
return self._cloud_cals.keys()
+ def get_cloud_account_detail(self, account_name):
+ """ Returns the cloud detail message"""
+ cloud_account = self._cloud_cals[account_name]
+ return cloud_account.get_cloud_account()
+
def add_cloud_account(self, account):
self._log.debug("Received CAL account. Account Name: %s, Account Type: %s",
account.name, account.account_type)
self._log.info("Selected pool %s for resource allocation", pool.name)
r_id, r_info = yield from pool.allocate_resource(request)
-
+
self._resource_table[event_id] = (r_id, cloud_account_name, pool.name)
return r_info
self._resource_table[event_id] = (r_id, cloud_account_name, resource.pool_name)
new_resource = pool._resource_class(r_id, 'dynamic', request)
if resource_type == 'compute':
- requested_params = RwcalYang.VDUInitParams()
+ requested_params = RwcalYang.YangData_RwProject_Project_VduInitParams()
requested_params.from_dict(request.as_dict())
new_resource.requested_params = requested_params
pool._all_resources[r_id] = new_resource
#
import asyncio
+import gi
import sys
-import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
RwLaunchpadYang,
RwcalYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from gi.repository.RwTypes import RwStatus
import rift.tasklets
self._dts = dts
self._loop = loop
self._parent = parent
+ self._project = parent._project
self._vdu_reg = None
self._link_reg = None
yield from asyncio.wait([self._link_reg_event.wait(), self._vdu_reg_event.wait()],
timeout=timeout, loop=self._loop)
- def create_record_dts(self, regh, xact, path, msg):
+ def _add_config_flag(self, xpath, config=False):
+ if xpath[0] == '/':
+ if config:
+ return 'C,' + xpath
+ else:
+ return 'D,' + xpath
+
+ return xpath
+
+ def create_record_dts(self, regh, xact, xpath, msg):
"""
Create a record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Creating Resource Record xact = %s, %s:%s",
xact, path, msg)
regh.create_element(path, msg)
- def delete_record_dts(self, regh, xact, path):
+ def delete_record_dts(self, regh, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._add_config_flag(self._project.add_project(xpath))
self._log.debug("Deleting Resource Record xact = %s, %s",
xact, path)
regh.delete_element(path)
+
@asyncio.coroutine
def register(self):
@asyncio.coroutine
"""
# wait for 3 seconds
yield from asyncio.sleep(3, loop=self._loop)
+
+ try:
+ response_info = yield from self._parent.reallocate_virtual_network(
+ link.event_id,
+ link.cloud_account,
+ link.request_info, link.resource_info,
+ )
+ except Exception as e:
+ self._log.error("Encoutered exception in reallocate_virtual_network")
+ self._log.exception(e)
+
- response_info = yield from self._parent.reallocate_virtual_network(link.event_id,
- link.cloud_account,
- link.request_info, link.resource_info,
- )
if (xact_event == rwdts.MemberEvent.INSTALL):
link_cfg = self._link_reg.elements
+ self._log.debug("onlink_event INSTALL event: {}".format(link_cfg))
+
for link in link_cfg:
self._loop.create_task(instantiate_realloc_vn(link))
+
+ self._log.debug("onlink_event INSTALL event complete")
+
return rwdts.MemberRspCode.ACTION_OK
@asyncio.coroutine
# wait for 3 seconds
yield from asyncio.sleep(3, loop=self._loop)
- response_info = yield from self._parent.allocate_virtual_compute(vdu.event_id,
- vdu.cloud_account,
- vdu.request_info
- )
+ try:
+ response_info = yield from self._parent.allocate_virtual_compute(
+ vdu.event_id,
+ vdu.cloud_account,
+ vdu.request_info
+ )
+ except Exception as e:
+ self._log.error("Encoutered exception in allocate_virtual_network")
+ self._log.exception(e)
+ raise e
+
+ response_xpath = "/rw-resource-mgr:resource-mgmt/rw-resource-mgr:vdu-event/rw-resource-mgr:vdu-event-data[rw-resource-mgr:event-id={}]/resource-info".format(
+ quoted_key(vdu.event_id.strip()))
+
+ cloud_account = self._parent.get_cloud_account_detail(cloud_account)
+ asyncio.ensure_future(monitor_vdu_state(response_xpath, vdu.event_id, cloud_account.vdu_instance_timeout), loop=self._loop)
+
if (xact_event == rwdts.MemberEvent.INSTALL):
- vdu_cfg = self._vdu_reg.elements
- for vdu in vdu_cfg:
- self._loop.create_task(instantiate_realloc_vdu(vdu))
- return rwdts.MemberRspCode.ACTION_OK
+ vdu_cfg = self._vdu_reg.elements
+ self._log.debug("onvdu_event INSTALL event: {}".format(vdu_cfg))
+
+ for vdu in vdu_cfg:
+ self._loop.create_task(instantiate_realloc_vdu(vdu))
+
+ self._log.debug("onvdu_event INSTALL event complete")
- def on_link_request_commit(xact_info):
- """ The transaction has been committed """
- self._log.debug("Received link request commit (xact_info: %s)", xact_info)
return rwdts.MemberRspCode.ACTION_OK
+ @asyncio.coroutine
+ def allocate_vlink_task(ks_path, event_id, cloud_account, request_info):
+ response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData().schema()
+ pathentry = schema.keyspec_to_entry(ks_path)
+ try:
+ response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id,
+ cloud_account,
+ request_info)
+ except Exception as e:
+ self._log.error("Encountered exception: %s while creating virtual network", str(e))
+ self._log.exception(e)
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_ResourceInfo()
+ response_info.resource_state = 'failed'
+ response_info.resource_errors = str(e)
+ yield from self._dts.query_update(response_xpath,
+ rwdts.XactFlag.ADVISE,
+ response_info)
+ else:
+ yield from self._dts.query_update(response_xpath,
+ rwdts.XactFlag.ADVISE,
+ response_info)
+
+
@asyncio.coroutine
def on_link_request_prepare(xact_info, action, ks_path, request_msg):
- self._log.debug("Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s",
+ self._log.debug(
+ "Received virtual-link on_prepare callback (xact_info: %s, action: %s): %s",
xact_info, action, request_msg)
response_info = None
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
try:
- response_info = yield from self._parent.allocate_virtual_network(pathentry.key00.event_id,
- request_msg.cloud_account,
- request_msg.request_info)
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_ResourceInfo()
+ response_info.resource_state = 'pending'
+ request_msg.resource_info = response_info
+ self.create_record_dts(self._link_reg,
+ None,
+ ks_path.to_xpath(RwResourceMgrYang.get_schema()),
+ request_msg)
+
+ asyncio.ensure_future(allocate_vlink_task(ks_path,
+ pathentry.key00.event_id,
+ request_msg.cloud_account,
+ request_msg.request_info),
+ loop = self._loop)
except Exception as e:
- self._log.error("Encountered exception: %s while creating virtual network", str(e))
+ self._log.error(
+ "Encountered exception: %s while creating virtual network", str(e))
self._log.exception(e)
- response_info = RwResourceMgrYang.VirtualLinkEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_ResourceInfo()
response_info.resource_state = 'failed'
response_info.resource_errors = str(e)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info)
- else:
- request_msg.resource_info = response_info
- self.create_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()), request_msg)
elif action == rwdts.QueryAction.DELETE:
yield from self._parent.release_virtual_network(pathentry.key00.event_id)
- self.delete_record_dts(self._link_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+ self.delete_record_dts(self._link_reg, None,
+ ks_path.to_xpath(RwResourceMgrYang.get_schema()))
+
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_network_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
- raise ValueError("Only read/create/delete actions available. Received action: %s" %(action))
+ raise ValueError(
+ "Only read/create/delete actions available. Received action: %s" %(action))
- self._log.debug("Responding with VirtualLinkInfo at xpath %s: %s.",
- response_xpath, response_info)
+ self._log.info("Responding with VirtualLinkInfo at xpath %s: %s.",
+ response_xpath, response_info)
xact_info.respond_xpath(rwdts.XactRspCode.ACK, response_xpath, response_info)
- def on_vdu_request_commit(xact_info):
- """ The transaction has been committed """
- self._log.debug("Received vdu request commit (xact_info: %s)", xact_info)
- return rwdts.MemberRspCode.ACTION_OK
- def monitor_vdu_state(response_xpath, pathentry):
+ def monitor_vdu_state(response_xpath, event_id, vdu_timeout):
self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
- time_to_wait = 300
sleep_time = 2
- loop_cnt = int(time_to_wait/sleep_time)
+ loop_cnt = int(vdu_timeout/sleep_time)
+
for i in range(loop_cnt):
- self._log.debug("VDU state monitoring for xpath: %s. Sleeping for 2 second", response_xpath)
+ self._log.debug(
+ "VDU state monitoring for xpath: %s. Sleeping for 2 second", response_xpath)
yield from asyncio.sleep(2, loop = self._loop)
+
try:
- response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ response_info = yield from self._parent.read_virtual_compute_info(event_id)
except Exception as e:
- self._log.info("VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring",
- str(e),response_xpath)
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ self._log.info(
+ "VDU state monitoring: Received exception %s in VDU state monitoring for %s. Aborting monitoring", str(e),response_xpath)
+
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'failed'
response_info.resource_errors = str(e)
yield from self._dts.query_update(response_xpath,
response_info)
else:
if response_info.resource_state == 'active' or response_info.resource_state == 'failed':
- self._log.info("VDU state monitoring: VDU reached terminal state. Publishing VDU info: %s at path: %s",
+ self._log.info("VDU state monitoring: VDU reached terminal state. " +
+ "Publishing VDU info: %s at path: %s",
response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
return
else:
### End of loop. This is only possible if VDU did not reach active state
- err_msg = "VDU state monitoring: VDU at xpath :{} did not reached active state in {} seconds. Aborting monitoring".format(response_xpath, time_to_wait)
+ err_msg = ("VDU state monitoring: VDU at xpath :{} did not reached active " +
+ "state in {} seconds. Aborting monitoring".
+ format(response_xpath, time_to_wait))
self._log.info(err_msg)
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'failed'
response_info.resource_errors = err_msg
yield from self._dts.query_update(response_xpath,
def allocate_vdu_task(ks_path, event_id, cloud_account, request_msg):
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VDUEventData().schema()
+ response_xpath = self._add_config_flag(response_xpath)
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
try:
response_info = yield from self._parent.allocate_virtual_compute(event_id,
request_msg,)
except Exception as e:
self._log.error("Encountered exception : %s while creating virtual compute", str(e))
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'failed'
response_info.resource_errors = str(e)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info)
else:
+ cloud_account = self._parent.get_cloud_account_detail(cloud_account)
+ #RIFT-17719 - Set the resource state to active if no floating ip pool specified and is waiting for public ip.
+ if response_info.resource_state == 'pending' and cloud_account.has_field('openstack') \
+ and not (cloud_account.openstack.has_field('floating_ip_pool')) :
+ if (request_msg.has_field('allocate_public_address')) and (request_msg.allocate_public_address == True):
+ if not response_info.has_field('public_ip'):
+ response_info.resource_state = 'active'
+
if response_info.resource_state == 'failed' or response_info.resource_state == 'active' :
- self._log.info("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
- response_info, response_xpath)
+ self._log.debug("Virtual compute create task completed. Publishing VDU info: %s at path: %s",
+ response_info, response_xpath)
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info)
else:
- asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+ asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry.key00.event_id, cloud_account.vdu_instance_timeout),
loop = self._loop)
-
@asyncio.coroutine
def on_vdu_request_prepare(xact_info, action, ks_path, request_msg):
self._log.debug("Received vdu on_prepare callback (xact_info: %s, action: %s): %s",
xact_info, action, request_msg)
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VDUEventData().schema()
+ response_xpath = self._add_config_flag(response_xpath)
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'pending'
request_msg.resource_info = response_info
self.create_record_dts(self._vdu_reg,
yield from self._parent.release_virtual_compute(pathentry.key00.event_id)
self.delete_record_dts(self._vdu_reg, None, ks_path.to_xpath(RwResourceMgrYang.get_schema()))
elif action == rwdts.QueryAction.READ:
- response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ # TODO: Check why we are getting null event id request
+ if pathentry.key00.event_id:
+ response_info = yield from self._parent.read_virtual_compute_info(pathentry.key00.event_id)
+ else:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
else:
raise ValueError("Only create/delete actions available. Received action: %s" %(action))
link_handlers = rift.tasklets.Group.Handler(on_event=onlink_event,)
with self._dts.group_create(handler=link_handlers) as link_group:
- self._log.debug("Registering for Link Resource Request using xpath: %s",
- ResourceMgrEvent.VLINK_REQUEST_XPATH)
-
- self._link_reg = link_group.register(xpath=ResourceMgrEvent.VLINK_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_link_request_commit,
- on_prepare=on_link_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
-
+ xpath = self._project.add_project(ResourceMgrEvent.VLINK_REQUEST_XPATH)
+ self._log.debug("Registering for Link Resource Request using xpath: {}".
+ format(xpath))
+
+ self._link_reg = link_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_prepare=on_link_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
vdu_handlers = rift.tasklets.Group.Handler(on_event=onvdu_event, )
with self._dts.group_create(handler=vdu_handlers) as vdu_group:
+
+ xpath = self._project.add_project(ResourceMgrEvent.VDU_REQUEST_XPATH)
+ self._log.debug("Registering for VDU Resource Request using xpath: {}".
+ format(xpath))
+
+ self._vdu_reg = vdu_group.register(xpath=xpath,
+ handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
+ on_prepare=on_vdu_request_prepare),
+ flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+
- self._log.debug("Registering for VDU Resource Request using xpath: %s",
- ResourceMgrEvent.VDU_REQUEST_XPATH)
+ def deregister(self):
+ self._log.debug("De-register for project {}".format(self._project.name))
- self._vdu_reg = vdu_group.register(xpath=ResourceMgrEvent.VDU_REQUEST_XPATH,
- handler=rift.tasklets.DTS.RegistrationHandler(on_ready=on_request_ready,
- on_commit=on_vdu_request_commit,
- on_prepare=on_vdu_request_prepare),
- flags=rwdts.Flag.PUBLISHER | rwdts.Flag.DATASTORE,)
+ if self._vdu_reg:
+ self._vdu_reg.deregister()
+ self._vdu_reg = None
+ if self._link_reg:
+ self._link_reg.deregister()
+ self._link_reg = None
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
from . import rwresmgr_core as Core
from . import rwresmgr_config as Config
class ResourceManager(object):
- def __init__(self, log, log_hdl, loop, dts):
+ def __init__(self, log, log_hdl, loop, dts, project):
self._log = log
self._log_hdl = log_hdl
self._loop = loop
self._dts = dts
+ self._project = project
+
self.config_handler = Config.ResourceMgrConfig(self._dts, self._log, self._log_hdl, self._loop, self)
self.event_handler = Event.ResourceMgrEvent(self._dts, self._log, self._loop, self)
self.core = Core.ResourceMgrCore(self._dts, self._log, self._log_hdl, self._loop, self)
yield from self.config_handler.register()
yield from self.event_handler.register()
+ def deregister(self):
+ self.event_handler.deregister()
+ self.config_handler.deregister()
+
def add_cloud_account_config(self, account):
self._log.debug("Received Cloud-Account add config event for account: %s", account.name)
self.core.add_cloud_account(account)
cloud_account_names = self.core.get_cloud_account_names()
return cloud_account_names
+ def get_cloud_account_detail(self, account_name):
+ return self.core.get_cloud_account_detail(account_name)
+
def pool_add(self, cloud_account_name, pool):
self._log.debug("Received Pool add event for cloud account %s pool: %s",
cloud_account_name, pool.name)
return info
+class ResMgrProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(ResMgrProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._resource_manager = None
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.debug("Initializing the Resource Manager tasklet for project {}".
+ format(self.name))
+ self._resource_manager = ResourceManager(self._log,
+ self._log_hdl,
+ self._loop,
+ self._dts,
+ self,)
+ yield from self._resource_manager.register()
+
+ def deregister(self):
+ self._log.debug("De-registering project {}".format(self.name))
+ self._resource_manager.deregister()
+
+
class ResMgrTasklet(rift.tasklets.Tasklet):
def __init__(self, *args, **kwargs):
super(ResMgrTasklet, self).__init__(*args, **kwargs)
self.rwlog.set_category("rw-resource-mgr-log")
self._dts = None
- self._resource_manager = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(ResMgrTasklet, self).start()
- self.log.info("Starting ResMgrTasklet")
+ self.log.debug("Starting ResMgrTasklet")
self.log.debug("Registering with dts")
@asyncio.coroutine
def init(self):
- self._log.info("Initializing the Resource Manager tasklet")
- self._resource_manager = ResourceManager(self.log,
- self.log_hdl,
- self.loop,
- self._dts)
- yield from self._resource_manager.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, ResMgrProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
#!/usr/bin/env python3
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# limitations under the License.
#
-
import asyncio
+import gi
import logging
import os
+import random
import sys
import types
import unittest
import uuid
-import random
-
import xmlrunner
-import gi
gi.require_version('CF', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwMain', '1.0')
gi.require_version('RwTypes', '1.0')
gi.require_version('RwCal', '1.0')
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import gi.repository.CF as cf
import gi.repository.RwDts as rwdts
resource_requests = {'compute': {}, 'network': {}}
###### mycompute-0
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
msg.vm_flavor.vcpu_count = 4
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-0'] = msg
###### mycompute-1
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
msg.vm_flavor.vcpu_count = 2
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-1'] = msg
####### mynet-0
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-0'] = msg
####### mynet-1
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-1'] = msg
return resource_requests
resource_requests = {'compute': {}, 'network': {}}
###### mycompute-0
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = "1"
msg.vm_flavor.vcpu_count = 4
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-0'] = msg
###### mycompute-1
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = "1"
msg.vm_flavor.vcpu_count = 2
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-1'] = msg
####### mynet-0
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-0'] = msg
####### mynet-1
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-1'] = msg
return resource_requests
resource_requests = {'compute': {}, 'network': {}}
###### mycompute-0
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-0'))
msg.vm_flavor.vcpu_count = 4
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-0'] = msg
###### mycompute-1
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, 'image-1'))
msg.vm_flavor.vcpu_count = 2
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-1'] = msg
####### mynet-0
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-0'] = msg
####### mynet-1
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
resource_requests['network']['mynet-1'] = msg
return resource_requests
resource_requests = {'compute': {}, 'network': {}}
###### mycompute-0
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = openstack_info['image_id']
msg.vm_flavor.vcpu_count = 4
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-0'] = msg
###### mycompute-1
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = openstack_info['image_id']
msg.vm_flavor.vcpu_count = 2
msg.vm_flavor.memory_mb = 4096
resource_requests['compute']['mycompute-1'] = msg
####### mynet-0
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
msg.provider_network.physical_network = 'PHYSNET1'
msg.provider_network.overlay_type = 'VLAN'
msg.provider_network.segmentation_id = 17
resource_requests['network']['mynet-0'] = msg
####### mynet-1
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
msg.provider_network.physical_network = 'PHYSNET1'
msg.provider_network.overlay_type = 'VLAN'
msg.provider_network.segmentation_id = 18
resource_requests = {'compute': {}, 'network': {}}
###### mycompute-0
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = openstack_info['image_id']
msg.vm_flavor.vcpu_count = 2
msg.vm_flavor.memory_mb = 4096
resource_requests['compute']['mycompute-0'] = msg
###### mycompute-1
- msg = rmgryang.VDUEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_RequestInfo()
msg.image_id = openstack_info['image_id']
msg.vm_flavor.vcpu_count = 4
msg.vm_flavor.memory_mb = 8192
resource_requests['compute']['mycompute-1'] = msg
####### mynet-0
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
#msg.provider_network.overlay_type = 'VXLAN'
#msg.provider_network.segmentation_id = 71
resource_requests['network']['mynet-0'] = msg
####### mynet-1
- msg = rmgryang.VirtualLinkEventData_RequestInfo()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData_RequestInfo()
#msg.provider_network.overlay_type = 'VXLAN'
#msg.provider_network.segmentation_id = 73
resource_requests['network']['mynet-1'] = msg
def get_cal_account(account_type):
"""
- Creates an object for class RwcalYang.CloudAccount()
+ Creates an object for class RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
"""
- account = RwcalYang.CloudAccount()
+ account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
if account_type == 'mock':
account.name = 'mock_account'
account.account_type = "mock"
return 360
def get_cloud_account_msg(self, acct_type):
- cloud_account = RwCloudYang.CloudAccount()
+ cloud_account = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
acct = get_cal_account(acct_type)
cloud_account.from_dict(acct.as_dict())
cloud_account.name = acct.name
return cloud_account
def get_compute_pool_msg(self, name, pool_type, cloud_type):
- pool_config = rmgryang.ResourcePools()
+ pool_config = rmgryang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools()
pool = pool_config.pools.add()
pool.name = name
pool.resource_type = "compute"
return pool_config
def get_network_pool_msg(self, name, pool_type, cloud_type):
- pool_config = rmgryang.ResourcePools()
+ pool_config = rmgryang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools()
pool = pool_config.pools.add()
pool.name = name
pool.resource_type = "network"
def get_network_reserve_msg(self, name, cloud_type, xpath):
event_id = str(uuid.uuid4())
- msg = rmgryang.VirtualLinkEventData()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData()
msg.event_id = event_id
msg.request_info.name = name
attributes = ['physical_network', 'name', 'overlay_type', 'segmentation_id']
setattr(msg.request_info.provider_network, attr,
getattr(resource_requests[cloud_type]['network'][name].provider_network ,attr))
- return msg, xpath.format(event_id)
+ return msg, xpath.format(quoted_key(event_id))
def get_compute_reserve_msg(self, name, cloud_type, xpath, vlinks):
event_id = str(uuid.uuid4())
- msg = rmgryang.VDUEventData()
+ msg = rmgryang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData()
msg.event_id = event_id
msg.request_info.name = name
msg.request_info.image_id = resource_requests[cloud_type]['compute'][name].image_id
c1.virtual_link_id = link
self.log.info("Sending message :%s", msg)
- return msg, xpath.format(event_id)
+ return msg, xpath.format(quoted_key(event_id))
@asyncio.coroutine
def configure_cloud_account(self, dts, acct_type):
- account_xpath = "C,/rw-cloud:cloud/account"
+ account_xpath = "C,/rw-project:project/rw-cloud:cloud/account"
msg = self.get_cloud_account_msg(acct_type)
self.log.info("Configuring cloud-account: %s",msg)
yield from dts.query_create(account_xpath,
@asyncio.coroutine
def configure_compute_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_compute_pool_msg("virtual-compute", resource_type, cloud_type)
self.log.info("Configuring compute-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@asyncio.coroutine
def configure_network_resource_pools(self, dts, resource_type, cloud_type):
- pool_xpath = "C,/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
+ pool_xpath = "C,/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools"
msg = self.get_network_pool_msg("virtual-network", resource_type, cloud_type)
self.log.info("Configuring network-resource-pool: %s",msg)
yield from dts.query_create(pool_xpath,
@asyncio.coroutine
def verify_resource_pools_config(self, dts):
- pool_records_xpath = "D,/rw-resource-mgr:resource-pool-records"
+ pool_records_xpath = "D,/rw-project:project/rw-resource-mgr:resource-pool-records"
self.log.debug("Verifying test_create_resource_pools results")
res_iter = yield from dts.query_read(pool_records_xpath,)
for result in res_iter:
@asyncio.coroutine
def reserve_network_resources(self, name, dts, cloud_type):
- network_xpath = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id='{}']"
+ network_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data[event-id={}]"
msg,xpath = self.get_network_reserve_msg(name, cloud_type, network_xpath)
self.log.debug("Sending create event to network-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
@asyncio.coroutine
def reserve_compute_resources(self, name, dts, cloud_type, vlinks = []):
- compute_xpath = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id='{}']"
+ compute_xpath = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data[event-id={}]"
msg,xpath = self.get_compute_reserve_msg(name, cloud_type, compute_xpath, vlinks)
self.log.debug("Sending create event to compute-event xpath %s with msg: %s" % (xpath, msg))
yield from dts.query_create(xpath, 0, msg)
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
rift/tasklets/${TASKLET_NAME}/store/file_store.py
rift/tasklets/${TASKLET_NAME}/model/__init__.py
rift/tasklets/${TASKLET_NAME}/model/staging_area.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-rift_add_subdirs(test)
\ No newline at end of file
+rift_add_subdirs(test)
def __init__(self, model=None):
self._model = model
if not self._model:
- self._model = RwStagingMgmtYang.StagingArea.from_dict({})
+ self._model = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict({})
@property
def area_id(self):
def model(self):
return self._model
+ @property
+ def project_name(self):
+ return self._model.project_name
+
@property
def has_expired(self):
current_time = time.time()
#
import asyncio
+import gi
import uuid
from gi.repository import (RwDts as rwdts)
import rift.mano.dts as mano_dts
import rift.tasklets
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from ..protocol import StagingStoreProtocol
class StagingStorePublisher(mano_dts.DtsHandler, StagingStoreProtocol):
- def __init__(self, log, dts, loop):
- super().__init__(log, dts, loop)
+ def __init__(self, project):
+ super().__init__(project.log, project.dts, project.loop, project)
self.delegate = None
def xpath(self, area_id=None):
- return ("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
- ("[area-id='{}']".format(area_id) if area_id else ""))
+ return self.project.add_project("D,/rw-staging-mgmt:staging-areas/rw-staging-mgmt:staging-area" +
+ ("[area-id={}]".format(quoted_key(area_id)) if area_id else ""))
@asyncio.coroutine
def register(self):
assert self.reg is not None
+ def deregister(self):
+ self._log.debug("Project {}: de-register staging store handler".
+ format(self._project.name))
+ if self.reg:
+ self.reg.deregister()
+
def on_staging_area_create(self, store):
self.reg.update_element(self.xpath(store.area_id), store)
import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwStagingMgmtYang', '1.0')
+gi.require_version('rwlib', '1.0')
+
from gi.repository import (
RwDts as rwdts,
RwStagingMgmtYang)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
+import gi.repository.rwlib as rwlib
from . import rpc
from . import store
from .publisher import StagingStorePublisher
+class StagingManagerProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(StagingManagerProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.publisher = StagingStorePublisher(self)
+ # For recovery
+ self.publisher.delegate = tasklet.store
+
+ @asyncio.coroutine
+ def register (self):
+ yield from self.publisher.register()
+
+ def deregister(self):
+ self.publisher.deregister()
+
+
class StagingManagerTasklet(rift.tasklets.Tasklet):
"""Tasklet to handle all staging related operations
"""
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
+ self._project_handler = None
+ self.projects = {}
+
except Exception as e:
- self.log.exception(e)
+ self.log.exception("Staging Manager tasklet init: {}".
+ format(e))
def start(self):
super().start()
@asyncio.coroutine
def init(self):
- self.store = store.StagingFileStore(log=self.log)
- self.publisher = StagingStorePublisher(self.log, self.dts, self.loop)
- # Fore recovery
- self.publisher.delegate = self.store
- # For create and delete events
- self.store.delegate = self.publisher
- yield from self.publisher.register()
-
+ self.store = store.StagingFileStore(self)
io_loop = rift.tasklets.tornado.TaskletAsyncIOLoop(asyncio_loop=self.loop)
- self.app = StagingApplication(self.store)
+ self.app = StagingApplication(self.store, self.loop)
manifest = self.tasklet_info.get_pb_manifest()
ssl_cert = manifest.bootstrap_phase.rwsecurity.cert
self.dts,
self.loop,
self.store)
-
yield from self.create_stg_rpc.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, StagingManagerProject)
+ self.project_handler.register()
+
@asyncio.coroutine
def run(self):
- self.server.listen(self.app.PORT)
+ address = rwlib.getenv("RWVM_INTERNAL_IPADDR")
+ if (address is None):
+ address=""
+ self.server.listen(self.app.PORT, address=address)
+ self.server.listen(self.app.PORT, address="127.0.0.1")
@asyncio.coroutine
def on_dts_state_change(self, state):
MAX_BODY_SIZE = 1 * MB # Max. size loaded into memory!
PORT = 4568
- def __init__(self, store, cleanup_interval=60):
+ def __init__(self, store, loop, cleanup_interval=60):
self.store = store
+ self.loop = loop
- self.cleaner = CleanupThread(self.store, cleanup_interval=cleanup_interval)
+ assert self.loop is not None
+
+ self.cleaner = CleanupThread(self.store, loop=self.loop, cleanup_interval=cleanup_interval)
self.cleaner.start()
super(StagingApplication, self).__init__([
class CleanupThread(threading.Thread):
"""Daemon thread that clean up the staging area
"""
- def __init__(self, store, log=None, cleanup_interval=60):
+ def __init__(self, store, loop, log=None, cleanup_interval=60):
"""
Args:
- store : A compatible store object
+ store: A compatible store object
log (None, optional): Log handle
cleanup_interval (int, optional): Cleanup interval in secs
+ loop: Tasklet main loop
"""
super().__init__()
- self.log = log or logging.getLogger()
- self.store = store
+ self.log = log or logging.getLogger()
+ self.store = store
self._cleaner = CleanUpStaging(store, log)
self.cleanup_interval = cleanup_interval
- self.daemon = True
+ self.daemon = True
+ self.loop = loop
+
+ assert self.loop is not None
def run(self):
try:
while True:
- self._cleaner.cleanup()
+ self.loop.call_soon_threadsafe(self._cleaner.cleanup, )
time.sleep(self.cleanup_interval)
except Exception as e:
class StoreStreamerPart(multipart_streamer.MultiPartStreamer):
"""
Create a Part streamer with a custom temp directory. Using the default
- tmp directory and trying to move the file to $RIFT_ARTIFACTS occasionally
+ tmp directory and trying to move the file to $RIFT_VAR_ROOT occasionally
causes link errors. So create a temp directory within the staging area.
"""
def __init__(self, store, *args, **kwargs):
self.store = store
def create_part(self, headers):
+ #RIFT-18071: tmp directory was not getting created - throwing an error in the system test cases in HA failover.
+ if not os.path.exists(self.store.tmp_dir):
+ os.makedirs(self.store.tmp_dir)
return multipart_streamer.TemporaryFileStreamedPart(self, headers, tmp_dir=self.store.tmp_dir)
gi.require_version("RwStagingMgmtYang", "1.0")
from gi.repository import RwStagingMgmtYang
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import DEFAULT_PROJECT
from .. import model
from ..protocol import StagingStorePublisherProtocol
META_YAML = "meta.yaml"
DEFAULT_EXPIRY = 60 * 60
- def __init__(self, log=None, root_dir=None):
+ def __init__(self, tasklet, root_dir=None):
default_path = os.path.join(
- os.getenv('RIFT_ARTIFACTS'),
+ os.getenv('RIFT_VAR_ROOT'),
"launchpad/staging")
self.root_dir = root_dir or default_path
if not os.path.isdir(self.root_dir):
os.makedirs(self.root_dir)
- self.log = log or logging.getLogger()
+ self.log = tasklet.log
self.tmp_dir = tempfile.mkdtemp(dir=self.root_dir)
self._cache = {}
- self.delegate = None
+ self.tasklet = tasklet
def on_recovery(self, staging_areas):
for area in staging_areas:
return self._cache[area_id]
+ def get_delegate(self, project_name):
+ if not project_name:
+ project_name = DEFAULT_PROJECT
+
+ try:
+ proj = self.tasklet.projects[project_name]
+ except Exception as e:
+ err = "Project or project name not found {}: {}". \
+ format(msg.as_dict(), e)
+ self.log.error (err)
+ raise Exception (err)
+
+ return proj.publisher
+
def create_staging_area(self, staging_area_config):
"""Create the staging area
Args:
Raises:
StagingAreaExists: if the staging area already exists
"""
+ delegate = self.get_delegate(staging_area_config.project_name)
+
area_id = str(uuid.uuid4())
container_path = os.path.join(self.root_dir, str(area_id))
"path": container_path
})
- staging_area = RwStagingMgmtYang.StagingArea.from_dict(config_dict)
+ staging_area = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict(config_dict)
staging_area = model.StagingArea(staging_area)
self._cache[area_id] = staging_area
try:
- if self.delegate:
- self.delegate.on_staging_area_create(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_create(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
return staging_area
if type(staging_area) is str:
staging_area = self.get_staging_area(staging_area)
+ delegate = self.get_delegate(staging_area.project_name)
+
if os.path.isdir(staging_area.model.path):
shutil.rmtree(staging_area.model.path)
staging_area.model.status = "EXPIRED"
try:
- if self.delegate:
- self.delegate.on_staging_area_delete(staging_area.model)
+ if delegate:
+ delegate.on_staging_area_delete(staging_area.model)
except Exception as e:
- self.log.exception(str(e))
+ self.log.exception(e)
import argparse
import asyncio
+import gi
import logging
import os
import sys
import uuid
import xmlrunner
-import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwStagingMgmtYang', '1.0')
from gi.repository import (
)
import rift.tasklets.rwstagingmgr.publisher as publisher
import rift.test.dts
+from rift.mano.utils.project import ManoProject
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+class TestProject(ManoProject):
+ def __init__(self, log, dts, loop):
+ super().__init__(log)
+ self._dts = dts
+ self._loop = loop
class TestCase(rift.test.dts.AbstractDTSTest):
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = TestProject(self.log, self.dts, self.loop)
- self.job_handler = publisher.StagingStorePublisher(self.log, self.dts, self.loop)
+ self.job_handler = publisher.StagingStorePublisher(self.project)
def tearDown(self):
super().tearDown()
yield from asyncio.sleep(2, loop=self.loop)
published_xpaths = yield from self.get_published_xpaths()
assert self.job_handler.xpath() in published_xpaths
+ self.job_handler.deregister()
@rift.test.dts.async_test
def test_publish(self):
"""
yield from self.job_handler.register()
- mock_msg = RwStagingMgmtYang.StagingArea.from_dict({
+ mock_msg = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict({
"area_id": "123"})
self.job_handler.on_staging_area_create(mock_msg)
yield from asyncio.sleep(5, loop=self.loop)
- itr = yield from self.dts.query_read("/staging-areas/staging-area[area-id='{}']".format(
- mock_msg.area_id))
+ xpath = self.project.add_project("/staging-areas/staging-area[area-id={}]".
+ format(quoted_key(mock_msg.area_id)))
+ itr = yield from self.dts.query_read(xpath)
result = None
print (result)
assert result == mock_msg
+ self.job_handler.deregister()
def main():
runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
import unittest
import xmlrunner
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
from rift.tasklets.rwstagingmgr.store import StagingFileStore
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
import gi
gi.require_version('RwStagingMgmtYang', '1.0')
RwStagingMgmtYang,
)
+class MockTasklet(object):
+ def __init__(self):
+ self.log = logging.getLogger()
+ self.projects = {}
+ project = ManoProject(self.log, name=DEFAULT_PROJECT)
+ project.publisher = None
+ self.projects[project.name] = project
+
+ def set_delegate(self, store):
+ self.projects[DEFAULT_PROJECT].publisher = store
+
+
class TestSerializer(unittest.TestCase):
def test_staging_area_create(self):
"""
tmp_dir = tempfile.mkdtemp()
- store = StagingFileStore(root_dir=tmp_dir)
+ tasklet = MockTasklet()
+ store = StagingFileStore(tasklet, root_dir=tmp_dir)
- mock_model = RwStagingMgmtYang.StagingArea.from_dict({})
+ mock_model = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict({})
stg = store.create_staging_area(mock_model)
mock_id = stg.model.area_id
"""
tmp_dir = tempfile.mkdtemp()
- store = StagingFileStore(root_dir=tmp_dir)
+ tasklet = MockTasklet()
+ store = StagingFileStore(tasklet, root_dir=tmp_dir)
- mock_model = RwStagingMgmtYang.StagingArea.from_dict({})
+ mock_model = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict({})
# get the wrapped mock model
mock_model = store.create_staging_area(mock_model)
mock_id = mock_model.model.area_id
self.staging_id = str(uuid.uuid4())
self.staging_dir = os.path.join(self.staging_dir_tmp, self.staging_id)
os.makedirs(self.staging_dir)
- mock_model = RwStagingMgmtYang.StagingArea.from_dict({
+ mock_model = RwStagingMgmtYang.YangData_RwProject_Project_StagingAreas_StagingArea.from_dict({
'path': self.staging_dir,
"validity_time": int(time.time()) + 5
})
def get_app(self):
self.store, self.mock_model = self.create_mock_store()
- return StagingApplication(self.store, cleanup_interval=5)
+ return StagingApplication(self.store, self._loop, cleanup_interval=5)
def test_file_upload_and_download(self):
"""
headers={"Content-Type": "multipart/form-data"})
assert response.code == 200
+
assert os.path.isfile(os.path.join(
self.staging_dir,
os.path.basename(temp_file)))
print (self.get_url('/'))
print (self.staging_dir)
time.sleep(5)
+
+ self.store.remove_staging_area(self.mock_model)
self.store.remove_staging_area.assert_called_once_with(self.mock_model)
def tearDown(self):
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
FILES
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
- COMPONENT ${PKG_LONG_NAME}
+ rift/tasklets/${TASKLET_NAME}/subscriber.py
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import asyncio
import collections
import enum
+import gi
import logging
-import uuid
-import time
import os.path
import re
import shutil
import sys
+import time
+import uuid
+import yaml
-import gi
gi.require_version('RwDts', '1.0')
gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('VnfrYang', '1.0')
gi.require_version('RwVnfmYang', '1.0')
+gi.require_version('RwVnfdYang', '1.0')
gi.require_version('RwVlrYang', '1.0')
gi.require_version('RwManifestYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
from gi.repository import (
RwDts as rwdts,
RwVnfrYang,
+ RwVnfdYang,
+ VnfdYang,
RwVnfmYang,
RwVlrYang,
VnfrYang,
RwBaseYang,
RwResourceMgrYang,
ProtobufC,
+ RwTypes
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
import rift.package.store
import rift.package.cloud_init
import rift.package.script
import rift.mano.dts as mano_dts
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+ )
import rift.mano.utils.short_name as mano_short_name
+from . import subscriber
+VCP_FIELDS = ['name', 'id', 'connection_point_id', 'type_yang', 'ip_address', 'mac_address']
class VMResourceError(Exception):
""" VM resource Error"""
class VNFMPlacementGroupError(Exception):
+ """ VNF placement group Error """
pass
+
+class VlrError(Exception):
+ """ Virtual Link Record Error """
+ pass
+
+
class VirtualNetworkFunctionRecordState(enum.Enum):
""" VNFR state """
+ PRE_INIT = 0
INIT = 1
VL_INIT_PHASE = 2
VM_INIT_PHASE = 3
TERMINATED = 6
FAILED = 10
-
-class VcsComponent(object):
- """ VCS Component within the VNF descriptor """
- def __init__(self, dts, log, loop, cluster_name, vcs_handler, component, mangled_name):
- self._dts = dts
- self._log = log
- self._loop = loop
- self._component = component
- self._cluster_name = cluster_name
- self._vcs_handler = vcs_handler
- self._mangled_name = mangled_name
-
- @staticmethod
- def mangle_name(component_name, vnf_name, vnfd_id):
- """ mangled component name """
- return vnf_name + ":" + component_name + ":" + vnfd_id
-
- @property
- def name(self):
- """ name of this component"""
- return self._mangled_name
-
- @property
- def path(self):
- """ The path for this object """
- return("D,/rw-manifest:manifest" +
- "/rw-manifest:operational-inventory" +
- "/rw-manifest:component" +
- "[rw-manifest:component-name = '{}']").format(self.name)
-
- @property
- def instance_xpath(self):
- """ The path for this object """
- return("D,/rw-base:vcs" +
- "/instances" +
- "/instance" +
- "[instance-name = '{}']".format(self._cluster_name))
-
- @property
- def start_comp_xpath(self):
- """ start component xpath """
- return (self.instance_xpath +
- "/child-n[instance-name = 'START-REQ']")
-
- def get_start_comp_msg(self, ip_address):
- """ start this component """
- start_msg = RwBaseYang.VcsInstance_Instance_ChildN()
- start_msg.instance_name = 'START-REQ'
- start_msg.component_name = self.name
- start_msg.admin_command = "START"
- start_msg.ip_address = ip_address
-
- return start_msg
-
- @property
- def msg(self):
- """ Returns the message for this vcs component"""
-
- vcs_comp_dict = self._component.as_dict()
-
- def mangle_comp_names(comp_dict):
- """ mangle component name with VNF name, id"""
- for key, val in comp_dict.items():
- if isinstance(val, dict):
- comp_dict[key] = mangle_comp_names(val)
- elif isinstance(val, list):
- i = 0
- for ent in val:
- if isinstance(ent, dict):
- val[i] = mangle_comp_names(ent)
- else:
- val[i] = ent
- i += 1
- elif key == "component_name":
- comp_dict[key] = VcsComponent.mangle_name(val,
- self._vnfd_name,
- self._vnfd_id)
- return comp_dict
-
- mangled_dict = mangle_comp_names(vcs_comp_dict)
- msg = RwManifestYang.OpInventory_Component.from_dict(mangled_dict)
- return msg
-
- @asyncio.coroutine
- def publish(self, xact):
- """ Publishes the VCS component """
- self._log.debug("Publishing the VcsComponent %s, path = %s comp = %s",
- self.name, self.path, self.msg)
- yield from self._vcs_handler.publish(xact, self.path, self.msg)
-
- @asyncio.coroutine
- def start(self, xact, parent, ip_addr=None):
- """ Starts this VCS component """
- # ATTN RV - replace with block add
- start_msg = self.get_start_comp_msg(ip_addr)
- self._log.debug("starting component %s %s",
- self.start_comp_xpath, start_msg)
- yield from self._dts.query_create(self.start_comp_xpath,
- 0,
- start_msg)
- self._log.debug("started component %s, %s",
- self.start_comp_xpath, start_msg)
-
-
class VirtualDeploymentUnitRecord(object):
""" Virtual Deployment Unit Record """
def __init__(self,
dts,
log,
loop,
+ project,
vdud,
vnfr,
nsr_config,
mgmt_intf,
mgmt_network,
- cloud_account_name,
+ datacenter_name,
vnfd_package_store,
vdur_id=None,
placement_groups=[]):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._vdud = vdud
self._vnfr = vnfr
self._nsr_config = nsr_config
self._mgmt_intf = mgmt_intf
- self._cloud_account_name = cloud_account_name
+ self._datacenter_name = datacenter_name
self._vnfd_package_store = vnfd_package_store
self._mgmt_network = mgmt_network
self._rm_regh = None
self._vm_resp = None
self._vdud_cloud_init = None
- self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+ self._vdur_console_handler = VnfrConsoleOperdataDtsHandler(
+ dts, log, loop, self._vnfr._vnfm, self._vnfr.vnfr_id, self._vdur_id,self.vdu_id)
+
@asyncio.coroutine
def vdu_opdata_register(self):
yield from self._vdur_console_handler.register()
- def cp_ip_addr(self, cp_name):
- """ Find ip address by connection point name """
+ def vm_cp_info(self, cp_name):
+ """ Find the VM Connection info by connection point name """
if self._vm_resp is not None:
for conn_point in self._vm_resp.connection_points:
if conn_point.name == cp_name:
- return conn_point.ip_address
- return "0.0.0.0"
+ return conn_point
+ return None
+
+ def cp_ip_addr(self, cp_name):
+ """ Find ip address by connection point name """
+ vm_cp_info = self.vm_cp_info(cp_name)
+ if vm_cp_info:
+ return vm_cp_info.ip_address
+ else:
+ return "0.0.0.0"
def cp_mac_addr(self, cp_name):
""" Find mac address by connection point name """
- if self._vm_resp is not None:
- for conn_point in self._vm_resp.connection_points:
- if conn_point.name == cp_name:
- return conn_point.mac_addr
- return "00:00:00:00:00:00"
+ vm_cp_info = self.vm_cp_info(cp_name)
+ if vm_cp_info:
+ return vm_cp_info.mac_addr
+ else:
+ return "00:00:00:00:00:00"
def cp_id(self, cp_name):
""" Find connection point id by connection point name """
- if self._vm_resp is not None:
- for conn_point in self._vm_resp.connection_points:
- if conn_point.name == cp_name:
- return conn_point.connection_point_id
- return ''
+ vm_cp_info = self.vm_cp_info(cp_name)
+ if vm_cp_info:
+ return vm_cp_info.connection_point_id
+ else:
+ return str()
+
@property
def vdu_id(self):
""" Return this VDUR's unique short name """
# Impose these restrictions on Unique name
# Max 64
- # - Max 10 of NSR name (remove all specialcharacters, only numbers and alphabets)
- # - 6 chars of shortened name
- # - Max 10 of VDU name (remove all specialcharacters, only numbers and alphabets)
+ # - Max trailing 10 chars of NSR name (remove all specialcharacters, only numbers and alphabets)
+ # - 9 chars of shortened name
+ # - Max trailing 10 of VDU name (remove all specialcharacters, only numbers and alphabets)
#
def _restrict_tag(input_str):
# Exclude all characters except a-zA-Z0-9
return shortstr
@property
- def cloud_account_name(self):
+ def datacenter_name(self):
""" Cloud account this VDU should be created in """
- return self._cloud_account_name
+ return self._datacenter_name
@property
def image_name(self):
"vswitch_epa",
"hypervisor_epa",
"host_epa",
- "volumes",
+ "volumes"
]
+
vdu_copy_dict = {k: v for k, v in
self._vdud.as_dict().items() if k in vdu_fields}
vdur_dict = {"id": self._vdur_id,
"unique_short_name": self.unique_short_name
}
+
if self.vm_resp is not None:
vdur_dict.update({"vim_id": self.vm_resp.vdu_id,
"flavor_id": self.vm_resp.flavor_id
if self._vm_resp.has_field('image_id'):
vdur_dict.update({ "image_id": self.vm_resp.image_id })
- if self.management_ip is not None:
+ if self.management_ip:
vdur_dict["management_ip"] = self.management_ip
- if self.vm_management_ip is not None:
+ if self.vm_management_ip:
vdur_dict["vm_management_ip"] = self.vm_management_ip
vdur_dict.update(vdu_copy_dict)
+
if self.vm_resp is not None:
if self._vm_resp.has_field('volumes'):
for opvolume in self._vm_resp.volumes:
vdur_dict['supplemental_boot_data']['boot_data_drive'] = self._vm_resp.supplemental_boot_data.boot_data_drive
if self._vm_resp.supplemental_boot_data.has_field('custom_meta_data'):
metadata_list = list()
+
+ # supplemental_boot_data below is returned by Openstack.
+ # The self._vm_resp version of supplemental data is defaulting to CLOUD_METADATA
+ # as Openstack does not repond with 'destination' attribute of custom meta data elements.
+ # Therefore the vdur when published does not specify the destination of the custom-meta-data.
+ # Should we add this field (destination) explicitly here by comparig the keys with the already obtained
+ # details in self._vdud ?
+
for metadata_item in self._vm_resp.supplemental_boot_data.custom_meta_data:
- metadata_list.append(metadata_item.as_dict())
+ metadata_list.append(metadata_item.as_dict())
vdur_dict['supplemental_boot_data']['custom_meta_data'] = metadata_list
+
if self._vm_resp.supplemental_boot_data.has_field('config_file'):
file_list = list()
for file_item in self._vm_resp.supplemental_boot_data.config_file:
for intf, cp_id, vlr in self._int_intf:
cp = self.find_internal_cp_by_cp_id(cp_id)
- icp_list.append({"name": cp.name,
- "id": cp.id,
- "type_yang": "VPORT",
- "ip_address": self.cp_ip_addr(cp.id),
- "mac_address": self.cp_mac_addr(cp.id)})
+ cp_info = dict(name=cp.name,
+ id=cp.id,
+ type_yang='VPORT',
+ ip_address=self.cp_ip_addr(cp.name),
+ mac_address=self.cp_mac_addr(cp.name),
+ connection_point_id=self.cp_id(cp.name))
+
+ virtual_cps = [ vcp for vcp in vlr._vlr.virtual_connection_points
+ if [ True for cp_ref in vcp.associated_cps if cp.name == cp_ref ]]
+
+ if virtual_cps:
+ for vcp in virtual_cps:
+ cp_info['virtual_cps'] = [ {k:v for k,v in vcp.as_dict().items() if k in VCP_FIELDS}
+ for vcp in virtual_cps ]
+
+ icp_list.append(cp_info)
+
+ ii_dict = {"name": intf.name,
+ "internal_connection_point_ref": cp.id,
+ "virtual_interface": {}}
- ii_list.append({"name": intf.name,
- "vdur_internal_connection_point_ref": cp.id,
- "virtual_interface": {}})
+ if "position" in intf.as_dict():
+ ii_dict["position"] = intf.position
+
+ ii_list.append(ii_dict)
vdur_dict["internal_connection_point"] = icp_list
self._log.debug("internal_connection_point:%s", vdur_dict["internal_connection_point"])
- vdur_dict["internal_interface"] = ii_list
+
ei_list = []
for intf, cp, vlr in self._ext_intf:
- ei_list.append({"name": cp.name,
- "vnfd_connection_point_ref": cp.name,
- "virtual_interface": {}})
+ ei_dict = {"name": intf.name,
+ "external_connection_point_ref": cp.name,
+ "virtual_interface": {}}
+ if "position" in intf.as_dict():
+ ei_dict["position"] = intf.position
+
+ ei_list.append(ei_dict)
+
+ virtual_cps = [ vcp for vcp in vlr.virtual_connection_points
+ if [ True for cp_ref in vcp.associated_cps if cp.name == cp_ref ]]
+
+ if virtual_cps:
+ for vcp in virtual_cps:
+ virtual_cp_info = [ {k:v for k,v in vcp.as_dict().items() if k in VCP_FIELDS}
+ for vcp in virtual_cps ]
+ else:
+ virtual_cp_info = []
+
self._vnfr.update_cp(cp.name,
self.cp_ip_addr(cp.name),
self.cp_mac_addr(cp.name),
- self.cp_id(cp.name))
+ self.cp_id(cp.name),
+ virtual_cp_info)
- vdur_dict["external_interface"] = ei_list
+ vdur_dict["interface"] = ei_list + ii_list
- placement_groups = []
- for group in self._placement_groups:
- placement_groups.append(group.as_dict())
- vdur_dict['placement_groups_info'] = placement_groups
- return RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
+ vdur_dict['placement_groups_info'] = [group.as_dict()
+ for group in self._placement_groups]
+
+ return RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur.from_dict(vdur_dict)
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
- "/vdu-event" +
- "/vdu-event-data[event-id='{}']".format(self._request_id))
+ xpath = self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
+ "/vdu-event" +
+ "/vdu-event-data[event-id={}]".format(quoted_key(self._request_id)))
+ return xpath
@property
def vm_flavor_msg(self):
def vdud_cloud_init(self):
""" Return the cloud-init contents for the VDU """
if self._vdud_cloud_init is None:
- self._vdud_cloud_init = self.cloud_init()
+ ci = self.cloud_init()
+
+ # VNFR ssh public key, if available
+ if self._vnfr.public_key:
+ if not ci:
+ ci = "#cloud-config"
+ self._vdud_cloud_init = """{}
+ssh_authorized_keys:
+ - {}""". \
+ format(ci, self._vnfr.public_key)
+ else:
+ self._vdud_cloud_init = ci
+
+ self._log.debug("Cloud init: {}".format(self._vdud_cloud_init))
return self._vdud_cloud_init
""" Populate cloud_init with cloud-config script from
either the inline contents or from the file provided
"""
+ cloud_init_msg = None
if self._vdud.cloud_init is not None:
self._log.debug("cloud_init script provided inline %s", self._vdud.cloud_init)
- return self._vdud.cloud_init
+ cloud_init_msg = self._vdud.cloud_init
elif self._vdud.cloud_init_file is not None:
# Get cloud-init script contents from the file provided in the cloud_init_file param
self._log.debug("cloud_init script provided in file %s", self._vdud.cloud_init_file)
stored_package = self._vnfd_package_store.get_package(self._vnfr.vnfd_id)
cloud_init_extractor = rift.package.cloud_init.PackageCloudInitExtractor(self._log)
try:
- return cloud_init_extractor.read_script(stored_package, filename)
+ cloud_init_msg = cloud_init_extractor.read_script(stored_package, filename)
except rift.package.cloud_init.CloudInitExtractionError as e:
self.instantiation_failed(str(e))
raise VirtualDeploymentUnitRecordError(e)
else:
- self._log.debug("VDU Instantiation: cloud-init script not provided")
+ if not self._vnfr._vnfr_msg.cloud_config.key_pair and not self._vnfr._vnfr_msg.cloud_config.user:
+ self._log.debug("VDU Instantiation: cloud-init script not provided")
+ return
+
+ self._log.debug("Current cloud init msg is {}".format(cloud_init_msg))
+ if not self._vnfr._vnfr_msg.cloud_config.key_pair and not self._vnfr._vnfr_msg.cloud_config.user:
+ return cloud_init_msg
+
+ cloud_init_dict = {}
+ if cloud_init_msg:
+ try:
+ cloud_init_dict = yaml.load(cloud_init_msg)
+ except Exception as e:
+ self._log.exception(e)
+ self._log.error("Error loading cloud init Yaml file with exception %s", str(e))
+ return cloud_init_msg
+
+ self._log.debug("Current cloud init dict is {}".format(cloud_init_dict))
+
+ for key_pair in self._vnfr._vnfr_msg.cloud_config.key_pair:
+ if "ssh_authorized_keys" not in cloud_init_dict:
+ cloud_init_dict["ssh_authorized_keys"] = list()
+ cloud_init_dict["ssh_authorized_keys"].append(key_pair.key)
+
+ users = list()
+ for user_entry in self._vnfr._vnfr_msg.cloud_config.user:
+ if "users" not in cloud_init_dict:
+ cloud_init_dict["users"] = list()
+ user = {}
+ user["name"] = user_entry.name
+ user["gecos"] = user_entry.user_info
+ user["sudo"] = "ALL=(ALL) NOPASSWD:ALL"
+ user["ssh-authorized-keys"] = list()
+ for ssh_key in user_entry.key_pair:
+ user["ssh-authorized-keys"].append(ssh_key.key)
+ cloud_init_dict["users"].append(user)
+
+ cloud_msg = yaml.safe_dump(cloud_init_dict,width=1000,default_flow_style=False)
+ cloud_init = "#cloud-config\n"+cloud_msg
+ self._log.debug("Cloud init msg is {}".format(cloud_init))
+ return cloud_init
def process_openstack_placement_group_construct(self, vm_create_msg_dict):
host_aggregates = []
if availability_zones:
if len(availability_zones) > 1:
- self._log.error("Can not launch VDU: %s in multiple availability zones. Requested Zones: %s", self.name, availability_zones)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability zones. Requsted Zones".format(self.name, availability_zones))
+ self._log.error("Can not launch VDU: %s in multiple availability zones. " +
+ "Requested Zones: %s", self.name, availability_zones)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple availability" +
+ " zones. Requsted Zones".format(self.name, availability_zones))
else:
vm_create_msg_dict['availability_zone'] = availability_zones[0]
if server_groups:
if len(server_groups) > 1:
- self._log.error("Can not launch VDU: %s in multiple Server Group. Requested Groups: %s", self.name, server_groups)
- raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple Server Groups. Requsted Groups".format(self.name, server_groups))
+ self._log.error("Can not launch VDU: %s in multiple Server Group. " +
+ "Requested Groups: %s", self.name, server_groups)
+ raise VNFMPlacementGroupError("Can not launch VDU: {} in multiple " +
+ "Server Groups. Requsted Groups".format(self.name, server_groups))
else:
vm_create_msg_dict['server_group'] = server_groups[0]
# Find source file in scripts dir of VNFD
self._log.debug("Checking for source config file at %s", source)
try:
- source_file_str = cloud_init_extractor.read_script(stored_package, source)
+ try:
+ source_file_str = cloud_init_extractor.read_script(stored_package, source)
+ file_item['source'] = source_file_str
+ except rift.package.package.PackageError as e:
+ self._log.info("Invalid package with Package descriptor id")
+
except rift.package.cloud_init.CloudInitExtractionError as e:
raise VirtualDeploymentUnitRecordError(e)
# Update source file location with file contents
- file_item['source'] = source_file_str
return
"volumes",
"supplemental_boot_data"]
+ def make_resmgr_cp_args(intf, cp, vlr):
+ cp_info = dict(name = cp.name,
+ virtual_link_id = vlr.network_id,
+ type_yang = intf.virtual_interface.type_yang)
+
+ if vlr.network_id is None:
+ raise VlrError("Unresolved virtual link id for vlr id:%s, name:%s",
+ (vlr.id, vlr.name))
+
+ if cp.has_field('port_security_enabled'):
+ cp_info["port_security_enabled"] = cp.port_security_enabled
+
+ try:
+ if intf.static_ip_address:
+ cp_info["static_ip_address"] = intf.static_ip_address
+ except AttributeError as e:
+ ### This can happen because of model difference between OSM and RIFT. Ignore exception
+ self._log.debug(str(e))
+
+ if (intf.virtual_interface.has_field('vpci') and
+ intf.virtual_interface.vpci is not None):
+ cp_info["vpci"] = intf.virtual_interface.vpci
+
+ if (vlr.has_field('ip_profile_params')) and (vlr.ip_profile_params.has_field('security_group')):
+ cp_info['security_group'] = vlr.ip_profile_params.security_group
+
+ if vlr.has_field('virtual_connection_points'):
+ virtual_cps = [ vcp for vcp in vlr.virtual_connection_points
+ if [ True for cp_ref in vcp.associated_cps if cp.name == cp_ref ]]
+ if virtual_cps:
+ fields = ['connection_point_id', 'name', 'ip_address', 'mac_address']
+ cp_info['virtual_cps'] = [ {k:v for k,v in vcp.as_dict().items() if k in fields}
+ for vcp in virtual_cps ]
+
+ # Adding Port Sequence Information to cp_info
+ intf_dict = intf.as_dict()
+ if "position" in intf_dict:
+ cp_info["port_order"] = intf.position
+
+ self._log.debug("CP info {}".format(cp_info))
+ return cp_info
+
self._log.debug("Creating params based on VDUD: %s", self._vdud)
vdu_copy_dict = {k: v for k, v in self._vdud.as_dict().items() if k in vdu_fields}
if self._mgmt_network:
vm_create_msg_dict['mgmt_network'] = self._mgmt_network
- cp_list = []
+ cp_list = list()
for intf, cp, vlr in self._ext_intf:
- cp_info = { "name": cp.name,
- "virtual_link_id": vlr.network_id,
- "type_yang": intf.virtual_interface.type_yang }
-
- if cp.has_field('port_security_enabled'):
- cp_info["port_security_enabled"] = cp.port_security_enabled
-
- if (intf.virtual_interface.has_field('vpci') and
- intf.virtual_interface.vpci is not None):
- cp_info["vpci"] = intf.virtual_interface.vpci
-
- if (vlr.has_field('ip_profile_params')) and (vlr.ip_profile_params.has_field('security_group')):
- cp_info['security_group'] = vlr.ip_profile_params.security_group
+ cp_list.append(make_resmgr_cp_args(intf, cp, vlr))
- cp_list.append(cp_info)
-
- for intf, cp, vlr in self._int_intf:
- if (intf.virtual_interface.has_field('vpci') and
- intf.virtual_interface.vpci is not None):
- cp_list.append({"name": cp,
- "virtual_link_id": vlr.network_id,
- "type_yang": intf.virtual_interface.type_yang,
- "vpci": intf.virtual_interface.vpci})
- else:
- if cp.has_field('port_security_enabled'):
- cp_list.append({"name": cp,
- "virtual_link_id": vlr.network_id,
- "type_yang": intf.virtual_interface.type_yang,
- "port_security_enabled": cp.port_security_enabled})
- else:
- cp_list.append({"name": cp,
- "virtual_link_id": vlr.network_id,
- "type_yang": intf.virtual_interface.type_yang})
+ for intf, cp_id, vlr in self._int_intf:
+ cp = self.find_internal_cp_by_cp_id(cp_id)
+ cp_list.append(make_resmgr_cp_args(intf, cp, vlr.msg()))
vm_create_msg_dict["connection_points"] = cp_list
self.process_placement_groups(vm_create_msg_dict)
if 'supplemental_boot_data' in vm_create_msg_dict:
- self.process_custom_bootdata(vm_create_msg_dict)
+ self.process_custom_bootdata(vm_create_msg_dict)
- msg = RwResourceMgrYang.VDUEventData()
+ msg = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData()
msg.event_id = self._request_id
- msg.cloud_account = self.cloud_account_name
+ msg.cloud_account = self.datacenter_name
+
msg.request_info.from_dict(vm_create_msg_dict)
+ for volume in self._vdud.volumes:
+ v = msg.request_info.volumes.add()
+ v.from_dict(volume.as_dict())
+
return msg
@asyncio.coroutine
self._rm_regh = None
if self._vdur_console_handler is not None:
- self._log.error("Deregistering vnfr vdur registration handle")
+ self._log.debug("Deregistering vnfr vdur console registration handle")
self._vdur_console_handler._regh.deregister()
self._vdur_console_handler._regh = None
cp_name)
return cp
- def find_internal_vlr_by_cp_name(cp_name):
- """ Find the VLR corresponding to the connection point name"""
- cp = None
-
- self._log.debug("find_internal_vlr_by_cp_name(%s) called",
- cp_name)
-
- for int_cp in self._vdud.internal_connection_point:
- self._log.debug("Checking for int cp %s in internal connection points",
- int_cp.id)
- if int_cp.id == cp_name:
- cp = int_cp
- break
+ def find_internal_vlr_by_cp_id(cp_id):
+ self._log.debug("find_internal_vlr_by_cp_id(%s) called",
+ cp_id)
- if cp is None:
- self._log.debug("Failed to find cp %s in internal connection points",
- cp_name)
- msg = "Failed to find cp %s in internal connection points" % cp_name
- raise VduRecordError(msg)
+ # Validate the cp
+ cp = self.find_internal_cp_by_cp_id(cp_id)
# return the VLR associated with the connection point
- return vnfr.find_vlr_by_cp(cp_name)
+ return vnfr.find_vlr_by_cp(cp_id)
- block = xact.block_create()
- self._log.debug("Executing vm request id: %s, action: create",
- self._request_id)
-
- # Resolve the networks associated external interfaces
- for ext_intf in self._vdud.external_interface:
- self._log.debug("Resolving external interface name [%s], cp[%s]",
- ext_intf.name, ext_intf.vnfd_connection_point_ref)
- cp = find_cp_by_name(ext_intf.vnfd_connection_point_ref)
+ def add_external_interface(interface):
+ # Add an external interface from vdu interface list
+ cp = find_cp_by_name(interface.external_connection_point_ref)
if cp is None:
self._log.debug("Failed to find connection point - %s",
- ext_intf.vnfd_connection_point_ref)
- continue
+ interface.external_connection_point_ref)
+ return
+
self._log.debug("Connection point name [%s], type[%s]",
cp.name, cp.type_yang)
vlr = vnfr.ext_vlr_by_id(cp.vlr_ref)
- etuple = (ext_intf, cp, vlr)
+ etuple = (interface, cp, vlr)
self._ext_intf.append(etuple)
self._log.debug("Created external interface tuple : %s", etuple)
- # Resolve the networks associated internal interfaces
- for intf in self._vdud.internal_interface:
- cp_id = intf.vdu_internal_connection_point_ref
+ @asyncio.coroutine
+ def add_internal_interface(interface):
+ # Add an internal interface from vdu interface list
+ cp_id = interface.internal_connection_point_ref
self._log.debug("Resolving internal interface name [%s], cp[%s]",
- intf.name, cp_id)
-
+ interface.name, cp_id)
+
+ if cp_id is None:
+ msg = "The Internal Interface : %s is not mapped to an internal connection point." % (interface.name)
+ self._log.error(msg)
+ raise VduRecordError(msg)
+
try:
- vlr = find_internal_vlr_by_cp_name(cp_id)
+ vlr = find_internal_vlr_by_cp_id(cp_id)
+ iter = yield from self._dts.query_read(vlr.vlr_path())
+ for itr in iter:
+ vlr._vlr = (yield from itr).result
except Exception as e:
self._log.debug("Failed to find cp %s in internal VLR list", cp_id)
msg = "Failed to find cp %s in internal VLR list, e = %s" % (cp_id, e)
raise VduRecordError(msg)
- ituple = (intf, cp_id, vlr)
+ ituple = (interface, cp_id, vlr)
self._int_intf.append(ituple)
self._log.debug("Created internal interface tuple : %s", ituple)
+
+ block = xact.block_create()
+
+ self._log.debug("Executing vm request id: %s, action: create",
+ self._request_id)
+
+ # Resolve the networks associated with interfaces ( both internal and external)
+
+ for intf in self._vdud.interface:
+ if intf.type_yang == 'EXTERNAL':
+ self._log.debug("Resolving external interface name [%s], cp[%s]",
+ intf.name, intf.external_connection_point_ref)
+ try:
+ add_external_interface(intf)
+ except Exception as e:
+ msg = "Failed to add external interface %s from vdu interface list, e = %s" % (intf.name, e)
+ self._log.error(msg)
+ raise VduRecordError(msg)
+ elif intf.type_yang == 'INTERNAL':
+ self._log.debug("Resolving internal interface name [%s], cp[%s]",
+ intf.name, intf.internal_connection_point_ref)
+ try:
+ yield from add_internal_interface(intf)
+ except Exception as e:
+ msg = "Failed to add internal interface %s from vdu interface list, e = %s" % (intf.name, e)
+ self._log.error(msg)
+ raise VduRecordError(msg)
+
+
+
resmgr_path = self.resmgr_path
resmgr_msg = self.resmgr_msg(config)
#self._vm_resp = resp.resource_info
return resp.resource_info
-
- @asyncio.coroutine
- def start_component(self):
- """ This VDUR is active """
- self._log.debug("Starting component %s for vdud %s vdur %s",
- self._vdud.vcs_component_ref,
- self._vdud,
- self._vdur_id)
- yield from self._vnfr.start_component(self._vdud.vcs_component_ref,
- self.vm_resp.management_ip)
-
@property
def active(self):
""" Is this VDU active """
self._log.debug("VDUR id %s in VNFR %s is active", self._vdur_id, self._vnfr.vnfr_id)
- if self._vdud.vcs_component_ref is not None:
- yield from self.start_component()
-
self._state = VDURecordState.READY
if self._vnfr.all_vdus_active():
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
try:
+ #Check if resource orchestrator is not rift so that resource manager tasklet is not invoked
+ if self._nsr_config.resource_orchestrator is not None:
+ return
+
reg_event = asyncio.Event(loop=self._loop)
@asyncio.coroutine
class InternalVirtualLinkRecord(object):
""" Internal Virtual Link record """
- def __init__(self, dts, log, loop, ivld_msg, vnfr_name, cloud_account_name, ip_profile=None):
+ def __init__(self, dts, log, loop, project, vnfm,
+ ivld_msg, vnfr_name, datacenter_name, ip_profile=None):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
+ self._vnfm = vnfm
self._ivld_msg = ivld_msg
self._vnfr_name = vnfr_name
- self._cloud_account_name = cloud_account_name
+ self._datacenter_name = datacenter_name
self._ip_profile = ip_profile
self._vlr_req = self.create_vlr()
self._vlr = None
+ self._network_id = None
self._state = VlRecordState.INIT
+ self._state_details = ""
@property
def vlr_id(self):
@property
def network_id(self):
""" Find VLR by id """
- return self._vlr.network_id if self._vlr else None
+ return self._network_id
+
+ @network_id.setter
+ def network_id(self, network_id):
+ """ network id setter"""
+ self._network_id = network_id
+
+ @property
+ def active(self):
+ """ """
+ return self._state == VlRecordState.ACTIVE
+
+ @property
+ def state(self):
+ """ state for this VLR """
+ return self._state
+
+ @property
+ def state_details(self):
+ """ state details for this VLR """
+ return self._state_details
def vlr_path(self):
""" VLR path for this VLR instance"""
- return "D,/vlr:vlr-catalog/vlr:vlr[vlr:id = '{}']".format(self.vlr_id)
+ return self._project.add_project("D,/vlr:vlr-catalog/vlr:vlr[vlr:id={}]".
+ format(quoted_key(self.vlr_id)))
def create_vlr(self):
""" Create the VLR record which will be instantiated """
vlr_dict = {"id": str(uuid.uuid4()),
"name": self.name,
- "cloud_account": self._cloud_account_name,
+ "datacenter": self._datacenter_name,
}
if self._ip_profile and self._ip_profile.has_field('ip_profile_params'):
vlr_dict.update(vld_copy_dict)
- vlr = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.from_dict(vlr_dict)
+ vlr = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.from_dict(vlr_dict)
+
+ if self._ivld_msg.has_field('virtual_connection_points'):
+ for cp in self._ivld_msg.virtual_connection_points:
+ vcp = vlr.virtual_connection_points.add()
+ vcp.from_dict(cp.as_dict())
+
return vlr
@asyncio.coroutine
self._log.debug("Create VL with xpath %s and vlr %s",
self.vlr_path(), self._vlr_req)
- with self._dts.transaction(flags=0) as xact:
- block = xact.block_create()
- block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req)
- self._log.debug("Executing VL create path:%s msg:%s",
- self.vlr_path(), self._vlr_req)
-
- res_iter = None
- try:
- res_iter = yield from block.execute()
- except Exception:
+ try:
+ with self._dts.transaction(flags=0) as xact:
+ block = xact.block_create()
+ block.add_query_create(xpath=self.vlr_path(), msg=self._vlr_req)
+ self._log.debug("Executing VL create path:%s msg:%s",
+ self.vlr_path(), self._vlr_req)
+
+ self._state = VlRecordState.INSTANTIATION_PENDING
+ self._state_details = "Oustanding VL create request:%s".format(self.vlr_path())
+ res_iter = None
+ try:
+ res_iter = yield from block.execute()
+ except Exception as e:
+ self._state = VlRecordState.FAILED
+ self._state_details = str(e)
+ self._log.exception("Caught exception while instantial VL")
+ raise
+
+ for ent in res_iter:
+ res = yield from ent
+ self._vlr = res.result
+
+ if self._vlr.operational_status == 'failed':
+ self._log.debug("VL creation failed for vlr id %s", self._vlr.id)
self._state = VlRecordState.FAILED
- self._log.exception("Caught exception while instantial VL")
- raise
+ self._state_details = self._vlr.operational_status_details
+ raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id))
- for ent in res_iter:
- res = yield from ent
- self._vlr = res.result
-
- if self._vlr.operational_status == 'failed':
- self._log.debug("VL creation failed for vlr id %s", self._vlr.id)
- self._state = VlRecordState.FAILED
- raise VnfrInstantiationFailed("instantiation due to VL failure %s" % (self._vlr.id))
+ except Exception as e:
+ self._log.error("Caught exception while instantiating VL:%s:%s, e:%s",
+ self.vlr_id, self._vlr.name, e)
+ self._state_details = str(e)
+ raise
self._log.info("Created VL with xpath %s and vlr %s",
self.vlr_path(), self._vlr)
else:
yield from instantiate_vlr()
- self._state = VlRecordState.ACTIVE
def vlr_in_vns(self):
""" Is there a VLR record in VNS """
if (self._state == VlRecordState.ACTIVE or
- self._state == VlRecordState.INSTANTIATION_PENDING or
- self._state == VlRecordState.FAILED):
+ self._state == VlRecordState.INSTANTIATION_PENDING or
+ self._state == VlRecordState.FAILED):
return True
return False
self._log.debug("Terminating VL with path %s", self.vlr_path())
self._state = VlRecordState.TERMINATE_PENDING
+ self._state_details = "VL Terminate pending"
block = xact.block_create()
block.add_query_delete(self.vlr_path())
yield from block.execute(flags=0, now=True)
self._state = VlRecordState.TERMINATED
+ self._state_details = "VL Terminated"
self._log.debug("Terminated VL with path %s", self.vlr_path())
+ def set_state_from_op_status(self, operational_status, operational_status_details):
+ """ Set the state of this VL based on operational_status"""
+
+ self._state_details = operational_status_details
+
+ if operational_status == 'running':
+ self._log.info("VL %s moved to active state", self.vlr_id)
+ self._state = VlRecordState.ACTIVE
+ elif operational_status == 'failed':
+ self._log.info("VL %s moved to failed state", self.vlr_id)
+ self._state = VlRecordState.FAILED
+ elif operational_status == 'vl_alloc_pending':
+ self._log.debug("VL %s is in alloc pending state", self.vlr_id)
+ self._state = VlRecordState.INSTANTIATION_PENDING
+ else:
+ raise VirtualLinkRecordError("Unknown operational_status %s" % (operational_status))
+
+ def msg(self):
+ """ Get a proto corresponding to this VLR """
+ msg = self._vlr
+ return msg
+
class VirtualNetworkFunctionRecord(object):
""" Virtual Network Function Record """
- def __init__(self, dts, log, loop, cluster_name, vnfm, vcs_handler, vnfr_msg, mgmt_network=None):
+ def __init__(self, dts, log, loop, cluster_name, vnfm, vnfr_msg,
+ mgmt_network=None, external_ro=False):
self._dts = dts
self._log = log
- self._loop = loop
+ self._loop = loop###
+ self._project = vnfm._project
self._cluster_name = cluster_name
self._vnfr_msg = vnfr_msg
self._vnfr_id = vnfr_msg.id
self._vnfd_id = vnfr_msg.vnfd.id
self._vnfm = vnfm
- self._vcs_handler = vcs_handler
self._vnfr = vnfr_msg
self._mgmt_network = mgmt_network
self._state = VirtualNetworkFunctionRecordState.INIT
self._state_failed_reason = None
self._ext_vlrs = {} # The list of external virtual links
- self._vlrs = [] # The list of internal virtual links
+ self._vlrs = {} # The list of internal virtual links
self._vdus = [] # The list of vdu
self._vlr_by_cp = {}
self._cprs = []
self._create_time = int(time.time())
self._vnf_mon = None
self._config_status = vnfr_msg.config_status
- self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log)
+ self._vnfd_package_store = rift.package.store.VnfdPackageFilesystemStore(self._log, project=self._project.name)
self._rw_vnfd = None
self._vnfd_ref_count = 0
+ self._ssh_pub_key = None
+ self._ssh_key_file = None
+ self._task = None
+ # Create an asyncio loop to know when the virtual links are ready
+ self._vls_ready = asyncio.Event(loop=self._loop)
+
+ # Counter for pre-init VNFR State Update DTS Query
+ self._init = False
+ self._external_ro = external_ro
+
def _get_vdur_from_vdu_id(self, vdu_id):
self._log.debug("Finding vdur for vdu_id %s", vdu_id)
self._log.debug("Searching through vdus: %s", self._vdus)
@property
def operational_status(self):
""" Operational status of this VNFR """
- op_status_map = {"INIT": "init",
+ op_status_map = {"PRE_INIT": "pre_init",
+ "INIT": "init",
"VL_INIT_PHASE": "vl_init_phase",
"VM_INIT_PHASE": "vm_init_phase",
"READY": "running",
@staticmethod
def vnfd_xpath(vnfd_id):
""" VNFD xpath associated with this VNFR """
- return "C,/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id = '{}']".format(vnfd_id)
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id={}]".
+ format(quoted_key(vnfd_id)))
+
+ @property
+ def external_ro(self):
+ return self._external_ro
+
+ @property
+ def task(self):
+ return self._task
+
+ @task.setter
+ def task(self, task):
+ self._task = task
@property
def vnfd_ref_count(self):
return self._vnfr.name
@property
- def cloud_account_name(self):
+ def datacenter_name(self):
""" Name of the cloud account this VNFR is instantiated in """
- return self._vnfr.cloud_account
+ return self._vnfr.datacenter
@property
def vnfd_id(self):
""" Config agent status for this VNFR """
return self._config_status
- def component_by_name(self, component_name):
- """ Find a component by name in the inventory list"""
- mangled_name = VcsComponent.mangle_name(component_name,
- self.vnf_name,
- self.vnfd_id)
- return self._inventory[mangled_name]
-
-
+ @property
+ def public_key(self):
+ return self._ssh_pub_key
@asyncio.coroutine
def get_nsr_config(self):
### Need access to NS instance configuration for runtime resolution.
### This shall be replaced when deployment flavors are implemented
- xpath = "C,/nsr:ns-instance-config"
+ xpath = self._project.add_project("C,/nsr:ns-instance-config")
results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
for result in results:
return None
@asyncio.coroutine
- def start_component(self, component_name, ip_addr):
- """ Start a component in the VNFR by name """
- comp = self.component_by_name(component_name)
- yield from comp.start(None, None, ip_addr)
+ def get_nsr_opdata(self):
+ """ NSR opdata associated with this VNFR """
+ xpath = self._project.add_project(
+ "D,/nsr:ns-instance-opdata/nsr:nsr" \
+ "[nsr:ns-instance-config-ref={}]". \
+ format(quoted_key(self._vnfr_msg.nsr_id_ref)))
+
+ results = yield from self._dts.query_read(xpath, rwdts.XactFlag.MERGE)
+
+ for result in results:
+ entry = yield from result
+ nsr_op = entry.result
+ return nsr_op
+
+ return None
+
def cp_ip_addr(self, cp_name):
""" Get ip address for connection point """
vnfd_fields = ["short_name", "vendor", "description", "version"]
vnfd_copy_dict = {k: v for k, v in self.vnfd.as_dict().items() if k in vnfd_fields}
- mgmt_intf = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MgmtInterface()
+ mgmt_intf = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MgmtInterface()
ip_address, port = self.mgmt_intf_info()
- if ip_address is not None:
+ if ip_address:
mgmt_intf.ip_address = ip_address
if port is not None:
mgmt_intf.port = port
+ if self._ssh_pub_key:
+ mgmt_intf.ssh_key.public_key = self._ssh_pub_key
+ mgmt_intf.ssh_key.private_key_file = self._ssh_key_file
+
vnfr_dict = {"id": self._vnfr_id,
"nsr_id_ref": self._vnfr_msg.nsr_id_ref,
"name": self.name,
"member_vnf_index_ref": self.member_vnf_index,
"operational_status": self.operational_status,
"operational_status_details": self._state_failed_reason,
- "cloud_account": self.cloud_account_name,
+ "datacenter": self.datacenter_name,
"config_status": self._config_status
}
vnfr_dict.update(vnfd_copy_dict)
- vnfr_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
- vnfr_msg.vnfd = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
+ vnfr_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(vnfr_dict)
+ vnfr_msg.vnfd = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vnfd.from_dict(self.vnfd.as_dict())
vnfr_msg.create_time = self._create_time
vnfr_msg.uptime = int(time.time()) - self._create_time
vnfr_msg.mgmt_interface = mgmt_intf
# Add all the VLRs to VNFR
- for vlr in self._vlrs:
+ for vlr_id, vlr in self._vlrs.items():
ivlr = vnfr_msg.internal_vlr.add()
ivlr.vlr_ref = vlr.vlr_id
- # Add all the VDURs to VDUR
+ # Add all the VDUs to VDUR
if self._vdus is not None:
for vdu in self._vdus:
vdur = vnfr_msg.vdur.add()
vnfr_msg.dashboard_url = self.dashboard_url
for cpr in self._cprs:
- new_cp = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
+ new_cp = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr.as_dict())
vnfr_msg.connection_point.append(new_cp)
if self._vnf_mon is not None:
for monp in self._vnf_mon.msg:
vnfr_msg.monitoring_param.append(
- VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
+ VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_MonitoringParam.from_dict(monp.as_dict()))
if self._vnfr.vnf_configuration is not None:
vnfr_msg.vnf_configuration.from_dict(self._vnfr.vnf_configuration.as_dict())
- if (ip_address is not None and
- vnfr_msg.vnf_configuration.config_access.mgmt_ip_address is None):
- vnfr_msg.vnf_configuration.config_access.mgmt_ip_address = ip_address
for group in self._vnfr_msg.placement_groups_info:
- group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_PlacementGroupsInfo()
+ group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_PlacementGroupsInfo()
group_info.from_dict(group.as_dict())
vnfr_msg.placement_groups_info.append(group_info)
return vnfr_msg
+ @asyncio.coroutine
+ def update_config(self, msg, xact):
+ self._log.debug("VNFM vnf config: {}".
+ format(msg.vnf_configuration.as_dict()))
+ self._config_status = msg.config_status
+ self._vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(
+ msg.as_dict())
+ self._log.debug("VNFR msg config: {}".
+ format(self._vnfr.as_dict()))
+
+ yield from self.publish(xact)
+
+ @asyncio.coroutine
+ def update_vnfr_after_substitution(self, msg, xact):
+ self._log.debug("Updating VNFR after Input Param Substitution: {}".
+ format(msg.as_dict()))
+ self._state = VirtualNetworkFunctionRecordState.INIT
+ self._vnfd = msg.vnfd
+ msg.operational_status = 'init'
+ self._vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.from_dict(
+ msg.as_dict())
+
+ self._log.debug("VNFR updated: {}".
+ format(self._vnfr.as_dict()))
+ yield from self.publish(xact)
+
@property
def dashboard_url(self):
ip, cfg_port = self.mgmt_intf_info()
@property
def xpath(self):
""" path for this VNFR """
- return("D,/vnfr:vnfr-catalog"
- "/vnfr:vnfr[vnfr:id='{}']".format(self.vnfr_id))
+ return self._project.add_project("D,/vnfr:vnfr-catalog"
+ "/vnfr:vnfr[vnfr:id={}]".format(quoted_key(self.vnfr_id)))
@asyncio.coroutine
def publish(self, xact):
vlr = InternalVirtualLinkRecord(dts=self._dts,
log=self._log,
loop=self._loop,
+ project=self._project,
+ vnfm=self._vnfm,
ivld_msg=ivld_msg,
vnfr_name=self.name,
- cloud_account_name=self.cloud_account_name,
+ datacenter_name=self.datacenter_name,
ip_profile=self.resolve_vld_ip_profile(self.vnfd, ivld_msg)
)
- self._vlrs.append(vlr)
+ self._vlrs[vlr.vlr_id] = vlr
+ self._vnfm.add_vlr_id_vnfr_map(vlr.vlr_id, self)
for int_cp in ivld_msg.internal_connection_point:
if int_cp.id_ref in self._vlr_by_cp:
self._log.debug("Instantiating Internal Virtual Links for vnfd id: %s",
self.vnfd_id)
- for vlr in self._vlrs:
+ for vlr_id, vlr in self._vlrs.items():
self._log.debug("Instantiating VLR %s", vlr)
yield from vlr.instantiate(xact, restart_mode)
+ # Wait for the VLs to be ready before yielding control out
+ if self._vlrs:
+ self._log.debug("VNFR id:%s, name:%s - Waiting for %d VLs to be ready",
+ self.vnfr_id, self.name, len(self._vlrs))
+ yield from self._vls_ready.wait()
+ else:
+ self._log.debug("VNFR id:%s, name:%s, No virtual links found",
+ self.vnfr_id, self.name)
+ self._vls_ready.set()
+
def find_vlr_by_cp(self, cp_name):
""" Find the VLR associated with the cp name """
return self._vlr_by_cp[cp_name]
for group_info in nsr_config.vnfd_placement_group_maps:
if group_info.placement_group_ref == input_group.name and \
group_info.vnfd_id_ref == self.vnfd_id:
- group = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ group = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
group_dict = {k:v for k,v in
group_info.as_dict().items()
if (k != 'placement_group_ref' and k !='vnfd_id_ref')}
placement_groups = []
### Step-1: Get VNF level placement groups
for group in self._vnfr_msg.placement_groups_info:
- #group_info = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
+ #group_info = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_PlacementGroupsInfo()
#group_info.from_dict(group.as_dict())
placement_groups.append(group)
group_info = self.resolve_placement_group_cloud_construct(group,
nsr_config)
if group_info is None:
- self._log.info("Could not resolve cloud-construct for placement group: %s", group.name)
- ### raise VNFMPlacementGroupError("Could not resolve cloud-construct for placement group: {}".format(group.name))
+ self._log.info("Could not resolve cloud-construct for " +
+ "placement group: %s", group.name)
else:
- self._log.info("Successfully resolved cloud construct for placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
+ self._log.info("Successfully resolved cloud construct for " +
+ "placement group: %s for VDU: %s in VNF: %s (Member Index: %s)",
str(group_info),
vdu.name,
self.vnf_name,
return placement_groups
+ @asyncio.coroutine
+ def substitute_vdu_input_parameters(self, vdu):
+ result = vdu
+ for vdu_vnfr in self.vnfd.vdu:
+ if vdu["id"] == vdu_vnfr.id:
+ result = vdu_vnfr.as_dict()
+ break
+
+ return RwVnfdYang.YangData_Vnfd_VnfdCatalog_Vnfd_Vdu.from_dict(result)
+
+
@asyncio.coroutine
def vdu_cloud_init_instantiation(self):
[vdu.vdud_cloud_init for vdu in self._vdus]
[ group.name for group in placement_groups],
vdur_id)
+ # Update VDU Info from VNFR (It contains the input parameter for VDUs as well)
+ vdu_updated = yield from self.substitute_vdu_input_parameters(vdu.as_dict())
+
vdur = VirtualDeploymentUnitRecord(
dts=self._dts,
log=self._log,
loop=self._loop,
- vdud=vdu,
+ project = self._project,
+ vdud=vdu_updated,
vnfr=vnfr,
nsr_config=nsr_config,
mgmt_intf=self.has_mgmt_interface(vdu),
mgmt_network=self._mgmt_network,
- cloud_account_name=self.cloud_account_name,
+ datacenter_name=self.datacenter_name,
vnfd_package_store=self._vnfd_package_store,
vdur_id=vdur_id,
placement_groups = placement_groups,
VirtualDeploymentUnitRecordError is raised.
"""
+
for dependency in dependencies[vdu.vdu_id]:
self._log.debug("{}: waiting for {}".format(vdu.vdu_id, dependency.vdu_id))
# Substitute any variables contained in the cloud config script
config = str(vdu.vdud_cloud_init) if vdu.vdud_cloud_init is not None else ""
-
+
parts = re.split("\{\{ ([^\}]+) \}\}", config)
+
if len(parts) > 1:
# Extract the variable names
# Iterate of the variables and substitute values from the
# datastore.
+
for variable in variables:
# Handle a reference to a VDU by ID
config = config.replace("{{ %s }}" % variable, value)
continue
+ # Handle a reference to Cloud Init Variables: Start with 'CI'
+ if variable.startswith('CI'):
+ custom_meta_data = datastore.get('vdu[{}]'.format(vdu.vdu_id) + ".custom_meta_data")
+ try:
+ for meta_data in custom_meta_data:
+ if meta_data.destination == 'CLOUD_INIT':
+ if meta_data.name == variable:
+ config = config.replace("{{ %s }}" % variable, meta_data.value)
+ except Exception:
+ raise ValueError("Unrecognized Cloud Init Variable")
+
+ continue
+
# Handle unrecognized variables
msg = 'unrecognized cloud-config variable: {}'
raise ValueError(msg.format(variable))
def vlr_xpath(self, vlr_id):
""" vlr xpath """
- return(
- "D,/vlr:vlr-catalog/"
- "vlr:vlr[vlr:id = '{}']".format(vlr_id))
+ return self._project.add_project("D,/vlr:vlr-catalog/"
+ "vlr:vlr[vlr:id={}]".format(quoted_key(vlr_id)))
def ext_vlr_by_id(self, vlr_id):
""" find ext vlr by id """
return self._ext_vlrs[vlr_id]
- @asyncio.coroutine
- def publish_inventory(self, xact):
- """ Publish the inventory associated with this VNF """
- self._log.debug("Publishing inventory for VNFR id: %s", self._vnfr_id)
-
- for component in self._rw_vnfd.component:
- self._log.debug("Creating inventory component %s", component)
- mangled_name = VcsComponent.mangle_name(component.component_name,
- self.vnf_name,
- self.vnfd_id
- )
- comp = VcsComponent(dts=self._dts,
- log=self._log,
- loop=self._loop,
- cluster_name=self._cluster_name,
- vcs_handler=self._vcs_handler,
- component=component,
- mangled_name=mangled_name,
- )
- if comp.name in self._inventory:
- self._log.debug("Duplicate entries in inventory %s for vnfr %s",
- component, self._vnfd_id)
- return
- self._log.debug("Adding component %s for vnrf %s",
- comp.name, self._vnfr_id)
- self._inventory[comp.name] = comp
- yield from comp.publish(xact)
-
def all_vdus_active(self):
""" Are all VDUS in this VNFR active? """
for vdu in self._vdus:
# Update the VNFR with the changed status
yield from self.publish(None)
- def update_cp(self, cp_name, ip_address, mac_addr, cp_id):
+ def update_cp(self, cp_name, ip_address, mac_addr, cp_id, virtual_cps = list()):
"""Updated the connection point with ip address"""
for cp in self._cprs:
if cp.name == cp_name:
cp.ip_address = ip_address
cp.mac_address = mac_addr
cp.connection_point_id = cp_id
+ if virtual_cps:
+ cp.virtual_cps = [VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint_VirtualCps.from_dict(v) for v in virtual_cps]
return
err = "No connection point %s found in VNFR id %s" % (cp.name, self._vnfr_id)
@asyncio.coroutine
def instantiate(self, xact, restart_mode=False):
""" instantiate this VNF """
+ self._log.info("Instantiate VNF {}: {}".format(self._vnfr_id, self._state))
self.set_state(VirtualNetworkFunctionRecordState.VL_INIT_PHASE)
self._rw_vnfd = yield from self._vnfm.fetch_vnfd(self._vnfd_id)
+ nsr_op = yield from self.get_nsr_opdata()
+ if nsr_op:
+ self._ssh_key_file = nsr_op.ssh_key_generated.private_key_file
+ self._ssh_pub_key = nsr_op.ssh_key_generated.public_key
+
@asyncio.coroutine
def fetch_vlrs():
""" Fetch VLRs """
def cpr_from_cp(cp):
""" Creates a record level connection point from the desciptor cp"""
- cp_fields = ["name", "image", "vm-flavor", "port_security_enabled"]
+ cp_fields = ["name", "image", "vm-flavor", "port_security_enabled", "type_yang"]
cp_copy_dict = {k: v for k, v in cp.as_dict().items() if k in cp_fields}
cpr_dict = {}
cpr_dict.update(cp_copy_dict)
- return VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
+ return VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_ConnectionPoint.from_dict(cpr_dict)
self._log.debug("Fetching VLRs for VNFR id = %s, cps = %s",
self._vnfr_id, self._vnfr.connection_point)
vlr_path = self.vlr_xpath(cp.vlr_ref)
self._log.debug("Fetching VLR with path = %s", vlr_path)
- res_iter = yield from self._dts.query_read(self.vlr_xpath(cp.vlr_ref),
+ res_iter = yield from self._dts.query_read(vlr_path,
rwdts.XactFlag.MERGE)
for i in res_iter:
r = yield from i
self._log.debug("VNFR-ID %s: Fetching vlrs", self._vnfr_id)
yield from fetch_vlrs()
- # Publish inventory
- self._log.debug("VNFR-ID %s: Publishing Inventory", self._vnfr_id)
- yield from self.publish_inventory(xact)
-
- # Publish inventory
+ # Publish VLs
self._log.debug("VNFR-ID %s: Creating VLs", self._vnfr_id)
yield from self.create_vls()
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("Publish VNFR {}: {}".format(self._vnfr_id, self._state))
yield from self.publish(xact)
yield from self.instantiation_failed(str(e))
return
+ vl_state, failed_vl = self.vl_instantiation_state()
+ if vl_state == VlRecordState.FAILED:
+ self._log.error("VL Instantiation failed for one or more of the internal virtual links, vl:%s",failed_vl)
+ yield from self.instantiation_failed(failed_vl.state_details)
+ return
+
self.set_state(VirtualNetworkFunctionRecordState.VM_INIT_PHASE)
# instantiate VDUs
yield from self.publish(xact)
# publish the VNFR
- self._log.debug("VNFR-ID %s: Publish VNFR", self._vnfr_id)
+ self._log.debug("VNFR {}: Publish VNFR with state {}".
+ format(self._vnfr_id, self._state))
yield from self.publish(xact)
# instantiate VDUs
# ToDo: Check if this should be prevented during restart
- self._log.debug("VNFR-ID %s: Instantiate VDUs", self._vnfr_id)
+ self._log.debug("Instantiate VDUs {}: {}".format(self._vnfr_id, self._state))
_ = self._loop.create_task(self.instantiate_vdus(xact, self))
# publish the VNFR
self._log.debug("VNFR-ID %s: Instantiation Done", self._vnfr_id)
- # create task updating uptime for this vnfr
- self._log.debug("VNFR-ID %s: Starting task to update uptime", self._vnfr_id)
- self._loop.create_task(self.vnfr_uptime_update(xact))
-
@asyncio.coroutine
def terminate(self, xact):
""" Terminate this virtual network function """
+ if self._task:
+ self._log.debug("Canceling scheduled tasks for VNFR %s", self._vnfr_id)
+ self._task.cancel()
+
self._log.debug("Terminatng VNF id %s", self.vnfr_id)
self.set_state(VirtualNetworkFunctionRecordState.TERMINATE)
@asyncio.coroutine
def terminate_vls():
""" Terminate VLs in this VNF """
- for vl in self._vlrs:
+ for vlr_id, vl in self._vlrs.items():
+ self._vnfm.remove_vlr_id_vnfr_map(vlr_id)
yield from vl.terminate(xact)
@asyncio.coroutine
self._log.debug("Terminated VNF id %s", self.vnfr_id)
self.set_state(VirtualNetworkFunctionRecordState.TERMINATED)
- @asyncio.coroutine
- def vnfr_uptime_update(self, xact):
- while True:
- # Return when vnfr state is FAILED or TERMINATED etc
- if self._state not in [VirtualNetworkFunctionRecordState.INIT,
- VirtualNetworkFunctionRecordState.VL_INIT_PHASE,
- VirtualNetworkFunctionRecordState.VM_INIT_PHASE,
- VirtualNetworkFunctionRecordState.READY]:
- return
- yield from self.publish(xact)
- yield from asyncio.sleep(2, loop=self._loop)
+ # Unref the VNFD
+ self.vnfd_unref()
+
+ def vl_instantiation_state(self):
+ """ Get the state of VL instantiation of this VNF """
+ failed_vl = None
+ for vl_id, vlr in self._vlrs.items():
+ if vlr.state == VlRecordState.ACTIVE:
+ continue
+ elif vlr.state == VlRecordState.FAILED:
+ failed_vl = vlr
+ return VlRecordState.FAILED, failed_vl
+ elif vlr.state == VlRecordState.INSTANTIATION_PENDING:
+ failed_vl = vlr, failed_vl
+ return VlRecordState.INSTANTIATION_PENDING, failed_vl
+ else:
+ self._log.debug("vlr %s still in state %s", vlr, vlr.state)
+ raise VlRecordError("Invalid state %s", vlr.state)
+ return VlRecordState.ACTIVE, failed_vl
+
+ def vl_instantiation_successful(self):
+ """ Mark that all VLs in this VNF are active """
+ if self._vls_ready.is_set():
+ self._log.debug("VNFR id %s, vls_ready is already set", self.id)
+
+ vl_state, failed_vl = self.vl_instantiation_state()
+
+ if vl_state == VlRecordState.ACTIVE:
+ self._log.info("VNFR id:%s name:%s has all Virtual Links in active state, Ready to orchestrate VDUs",
+ self.vnfr_id, self.name)
+ self._vls_ready.set()
+
+ elif vl_state == VlRecordState.FAILED:
+ self._log.error("VNFR id:%s name:%s One of the Virtual Links failed to reach active state.Failed to orchestrate VNF",
+ self.vnfr_id, self.name)
+ self.instantiation_failed("VNFR id %s: failed since VL %s did not come up".format(self.vnfr_id, failed_vl.name))
+ self._vls_ready.set()
+
+ def find_vlr(self, vlr_id):
+ """ Find VLR matching the passed VLR id """
+ if vlr_id in self._vlrs:
+ return self._vlrs[vlr_id]
+ return None
+
+ def vlr_event(self, vlr, action):
+ self._log.debug("Received VLR %s with action:%s", vlr, action)
+
+ vlr_local = self.find_vlr(vlr.id)
+ if vlr_local is None:
+ self._log.error("VLR %s:%s received for unknown id, state:%s ignoring event",
+ vlr.id, vlr.name, vlr.state)
+ return
+
+ if action == rwdts.QueryAction.CREATE or action == rwdts.QueryAction.UPDATE:
+ if vlr.operational_status == 'running':
+ vlr_local.set_state_from_op_status(vlr.operational_status, vlr.operational_status_details)
+ self._log.info("VLR %s:%s moving to active state",
+ vlr.id, vlr.name)
+ elif vlr.operational_status == 'failed':
+ vlr_local.set_state_from_op_status(vlr.operational_status, vlr.operational_status_details)
+ self._log.info("VLR %s:%s moving to failed state",
+ vlr.id, vlr.name)
+ else:
+ self._log.warning("VLR %s:%s received state:%s",
+ vlr.id, vlr.name, vlr.operational_status)
+
+ if vlr.has_field('network_id'):
+ vlr_local.network_id = vlr.network_id
+
+ # Check if vl instantiation successful for this VNFR
+ self.vl_instantiation_successful()
class VnfdDtsHandler(object):
""" DTS handler for VNFD config changes """
- XPATH = "C,/vnfd:vnfd-catalog/vnfd:vnfd"
+ XPATH = "C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd"
def __init__(self, dts, log, loop, vnfm):
self._dts = dts
self._loop = loop
self._vnfm = vnfm
self._regh = None
+ self._reg_ready = 0
@asyncio.coroutine
def regh(self):
""" DTS registration handle """
return self._regh
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD DTS handler for project {}".
+ format(self._vnfm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD configuration"""
+ @asyncio.coroutine
def on_apply(dts, acg, xact, action, scratch):
"""Apply the configuration"""
self._log.debug("Got VNFM VNFD apply (xact: %s) (action: %s)(scr: %s)",
xact, action, scratch)
is_recovery = xact.xact is None and action == rwdts.AppconfAction.INSTALL
+ # Create/Update a VNFD record
+ if self._regh:
+ for cfg in self._regh.get_xact_elements(xact):
+ # Only interested in those VNFD cfgs whose ID was received in prepare callback
+ if cfg.id in scratch.get('vnfds', []) or is_recovery:
+ self._vnfm.update_vnfd(cfg)
+ else:
+ self._log.warning("Reg handle none for {} in project {}".
+ format(self.__class__, self._vnfm._project))
+
+ scratch.pop('vnfds', None)
+
+ if is_recovery:
+ #yield from self._vnfm.vnfr_handler.register()
+ #yield from self._vnfm.vnfr_ref_handler.register()
+ self._reg_ready = 1
@asyncio.coroutine
def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
""" on prepare callback """
- self._log.debug("Got on prepare for VNFD (path: %s) (action: %s)",
- ks_path.to_xpath(RwVnfmYang.get_schema()), msg)
+ xpath = ks_path.to_xpath(RwVnfmYang.get_schema())
+ self._log.debug("Got on prepare for VNFD (path: %s) (action: %s) (msg: %s)",
+ xpath,
+ xact_info.query_action, msg)
fref = ProtobufC.FieldReference.alloc()
fref.goto_whole_message(msg.to_pbcm())
self._log.debug("Deleting VNFD with id %s", msg.id)
if self._vnfm.vnfd_in_use(msg.id):
self._log.debug("Cannot delete VNFD in use - %s", msg)
- err = "Cannot delete a VNFD in use - %s" % msg
- raise VirtualNetworkFunctionDescriptorRefCountExists(err)
+ err_msg = "Cannot delete a VNFD in use - %s" % msg
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE, xpath, err_msg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK, xpath)
+ return
# Delete a VNFD record
yield from self._vnfm.delete_vnfd(msg.id)
- xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ self._log.warning(
+ "VnfdDtsHandler in project {} with path {} for action {} failed: {}".
+ format(self._vnfm._project, xpath, xact_info.query_action, e))
+
+ xpath = self._vnfm._project.add_project(VnfdDtsHandler.XPATH)
+ self._log.debug("Registering for VNFD config using xpath: {}".
+ format(xpath))
- self._log.debug(
- "Registering for VNFD config using xpath: %s",
- VnfdDtsHandler.XPATH,
- )
acg_hdl = rift.tasklets.AppConfGroup.Handler(on_apply=on_apply)
with self._dts.appconf_group_create(handler=acg_hdl) as acg:
self._regh = acg.register(
- xpath=VnfdDtsHandler.XPATH,
+ xpath=xpath,
flags=rwdts.Flag.SUBSCRIBER | rwdts.Flag.DELTA_READY,
on_prepare=on_prepare)
-
-class VcsComponentDtsHandler(object):
- """ Vcs Component DTS handler """
- XPATH = ("D,/rw-manifest:manifest" +
- "/rw-manifest:operational-inventory" +
- "/rw-manifest:component")
-
- def __init__(self, dts, log, loop, vnfm):
- self._dts = dts
- self._log = log
- self._loop = loop
- self._regh = None
- self._vnfm = vnfm
-
- @property
- def regh(self):
- """ DTS registration handle """
- return self._regh
-
- @asyncio.coroutine
- def register(self):
- """ Registers VCS component dts publisher registration"""
- self._log.debug("VCS Comp publisher DTS handler registering path %s",
- VcsComponentDtsHandler.XPATH)
-
- hdl = rift.tasklets.DTS.RegistrationHandler()
- handlers = rift.tasklets.Group.Handler()
- with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=VcsComponentDtsHandler.XPATH,
- handler=hdl,
- flags=(rwdts.Flag.PUBLISHER |
- rwdts.Flag.NO_PREP_READ |
- rwdts.Flag.DATASTORE),)
-
- @asyncio.coroutine
- def publish(self, xact, path, msg):
- """ Publishes the VCS component """
- self._log.debug("Publishing the VcsComponent xact = %s, %s:%s",
- xact, path, msg)
- self.regh.create_element(path, msg)
- self._log.debug("Published the VcsComponent to %s xact = %s, %s:%s",
- VcsComponentDtsHandler.XPATH, xact, path, msg)
-
class VnfrConsoleOperdataDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]' and handles CRUD from DTS"""
+ """
+ Registers 'D,/rw-project:project/vnfr:vnfr-console/vnfr:vnfr[id]/vdur[id]'
+ and handles CRUD from DTS
+ """
+
@property
def vnfr_vdu_console_xpath(self):
""" path for resource-mgr"""
- return ("D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id='{}']/rw-vnfr:vdur[vnfr:id='{}']".format(self._vnfr_id,self._vdur_id))
+ return self._project.add_project(
+ "D,/rw-vnfr:vnfr-console/rw-vnfr:vnfr[rw-vnfr:id={}]".format(quoted_key(self._vnfr_id)) +
+ "/rw-vnfr:vdur[vnfr:id={}]".format(quoted_key(self._vdur_id)))
def __init__(self, dts, log, loop, vnfm, vnfr_id, vdur_id, vdu_id):
self._dts = dts
self._vdur_id = vdur_id
self._vdu_id = vdu_id
+ self._project = vnfm._project
+
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR console DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFR VDU Operational Data read from dts """
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("VDU Opdata path is {}".format(path_entry.key00.id))
try:
return
with self._dts.transaction() as new_xact:
resp = yield from vdur.read_resource(new_xact)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
if resp.console_url:
vdur_console.console_url = resp.console_url
self._log.debug("Recevied console URL for vdu {} is {}".format(self._vdu_id,vdur_console))
except Exception:
self._log.exception("Caught exception while reading VDU %s", self._vdu_id)
- vdur_console = RwVnfrYang.YangData_RwVnfr_VnfrConsole_Vnfr_Vdur()
+ vdur_console = RwVnfrYang.YangData_RwProject_Project_VnfrConsole_Vnfr_Vdur()
vdur_console.id = self._vdur_id
vdur_console.console_url = 'none'
xact_info.respond_xpath(rsp_code=rwdts.XactRspCode.ACK,
- xpath=self.vnfr_vdu_console_xpath,
- msg=vdur_console)
+ xpath=self.vnfr_vdu_console_xpath,
+ msg=vdur_console)
else:
#raise VnfRecordError("Not supported operation %s" % action)
self._log.error("Not supported operation %s" % action)
class VnfrDtsHandler(object):
- """ registers 'D,/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
+ """ registers 'D,/rw-project:project/vnfr:vnfr-catalog/vnfr:vnfr' and handles CRUD from DTS"""
XPATH = "D,/vnfr:vnfr-catalog/vnfr:vnfr"
def __init__(self, dts, log, loop, vnfm):
self._vnfm = vnfm
self._regh = None
+ self._project = vnfm._project
@property
def regh(self):
""" Return VNF manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFR DTS handler for project {}".
+ format(self._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for vnfr create/update/delete/read requests from dts """
- def on_commit(xact_info):
- """ The transaction has been committed """
- self._log.debug("Got vnfr commit (xact_info: %s)", xact_info)
- return rwdts.MemberRspCode.ACTION_OK
-
- def on_abort(*args):
- """ Abort callback """
- self._log.debug("VNF transaction got aborted")
@asyncio.coroutine
def on_event(dts, g_reg, xact, xact_event, scratch_data):
yield from vnfr.instantiate(None, restart_mode=True)
+ self._log.debug("Got on_event in vnfm: {}".format(xact_event))
+
if xact_event == rwdts.MemberEvent.INSTALL:
curr_cfg = self.regh.elements
for cfg in curr_cfg:
- vnfr = self.vnfm.create_vnfr(cfg)
- self._loop.create_task(instantiate_realloc_vnfr(vnfr))
+ try:
+ vnfr = self.vnfm.create_vnfr(cfg, restart_mode = True)
+ if vnfr is None:
+ self._log.error("Not Creating VNFR {} as corresponding NS is terminated".format(cfg.id))
+ else:
+ self._log.debug("Creating VNFR {}".format(vnfr.vnfr_id))
+ except Exception as e:
+ self._log.exception(e)
+ raise e
- self._log.debug("Got on_event in vnfm")
+ self._loop.create_task(instantiate_realloc_vnfr(vnfr))
return rwdts.MemberRspCode.ACTION_OK
xact_info, action, msg
)
+ @asyncio.coroutine
+ def create_vnf(vnfr):
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ if msg.operational_status == 'pre_init':
+ vnfr.set_state(VirtualNetworkFunctionRecordState.PRE_INIT)
+ yield from vnfr.publish(None)
+
+ if vnfr.external_ro:
+ return
+
+ if msg.operational_status == 'init':
+ vnfr._init = True
+ def on_instantiate_done(fut):
+ # If the do_instantiate fails, then publish NSR with failed result
+ e = fut.exception()
+ if e is not None:
+ import traceback, sys
+ print(traceback.format_exception(None,e, e.__traceback__), file=sys.stderr, flush=True)
+ self._log.exception("VNFR instantiation failed for VNFR id %s: %s", vnfr.vnfr_id, str(e))
+ self._loop.create_task(vnfr.instantiation_failed(failed_reason=str(e)))
+
+ try:
+ # RIFT-9105: Unable to add a READ query under an existing transaction
+ # xact = xact_info.xact
+ assert vnfr.task is None
+ vnfr.task = self._loop.create_task(vnfr.instantiate(None))
+ vnfr.task.add_done_callback(on_instantiate_done)
+
+
+ except Exception as e:
+ self._log.exception(e)
+ self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id)
+ vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
+ yield from vnfr.publish(None)
+
+ return
+
if action == rwdts.QueryAction.CREATE:
if not msg.has_field("vnfd"):
err = "Vnfd not provided"
self._log.error(err)
raise VnfRecordError(err)
-
vnfr = self.vnfm.create_vnfr(msg)
- try:
- # RIFT-9105: Unable to add a READ query under an existing transaction
- # xact = xact_info.xact
- yield from vnfr.instantiate(None)
- except Exception as e:
- self._log.exception(e)
- self._log.error("Error while instantiating vnfr:%s", vnfr.vnfr_id)
- vnfr.set_state(VirtualNetworkFunctionRecordState.FAILED)
- yield from vnfr.publish(None)
+ if vnfr is None:
+ self._log.error("Not Creating VNFR {} as corresponding NS is terminated".format(msg.id))
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ else:
+ yield from create_vnf(vnfr)
+ return
+
elif action == rwdts.QueryAction.DELETE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
if vnfr is None:
- self._log.debug("VNFR id %s not found for delete", path_entry.key00.id)
- raise VirtualNetworkFunctionRecordNotFound(
- "VNFR id %s", path_entry.key00.id)
+ self._log.error("VNFR id %s not found for delete", path_entry.key00.id)
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return
+ # Preventing exception here if VNFR id is not found. This means delete is
+ # invoked before Creation.
+ # raise VirtualNetworkFunctionRecordNotFound(
+ # "VNFR id %s", path_entry.key00.id)
try:
- yield from vnfr.terminate(xact_info.xact)
- # Unref the VNFD
- vnfr.vnfd_unref()
+ if not vnfr.external_ro:
+ yield from vnfr.terminate(xact_info.xact)
yield from self._vnfm.delete_vnfr(xact_info.xact, vnfr)
except Exception as e:
self._log.exception(e)
self._log.error("Caught exception while deleting vnfr %s", path_entry.key00.id)
elif action == rwdts.QueryAction.UPDATE:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfr = None
try:
vnfr = self._vnfm.get_vnfr(path_entry.key00.id)
+
+ if vnfr is None:
+ # This means one of two things : The VNFR has been deleted or its a Launchpad restart.
+ if msg.id in self._vnfm._deleted_vnfrs:
+ # VNFR is deleted.
+ self._log.error("Not Creating VNFR {} as corresponding NS is terminated".format(msg.id))
+ return
+
+ self._log.debug("Launchpad Restart - Recreating VNFR - %s", msg.id)
+ vnfr = self.vnfm.create_vnfr(msg)
+ if vnfr is None:
+ self._log.error("Not Creating VNFR {} as corresponding NS is terminated".format(msg.id))
+ else:
+ yield from create_vnf(vnfr)
+
+ return
+
except Exception as e:
- self._log.debug("No vnfr found with id %s", path_entry.key00.id)
+ self._log.error("Exception in VNFR Update : %s", str(e))
xact_info.respond_xpath(rwdts.XactRspCode.NA)
return
- if vnfr is None:
- self._log.debug("VNFR id %s not found for update", path_entry.key00.id)
- xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ if vnfr.external_ro:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return
+
+ if (msg.operational_status == 'pre_init' and not vnfr._init):
+ # Creating VNFR INSTANTIATION TASK
+ self._log.debug("VNFR {} update after substitution {} (operational_status {})".
+ format(vnfr.name, msg.vnfd, msg.operational_status))
+ yield from vnfr.update_vnfr_after_substitution(msg, xact_info)
+ yield from create_vnf(vnfr)
return
- self._log.debug("VNFR {} update config status {} (current {})".
- format(vnfr.name, msg.config_status, vnfr.config_status))
- # Update the config status and publish
- vnfr._config_status = msg.config_status
- yield from vnfr.publish(None)
+ else:
+ self._log.debug("VNFR {} update config status {} (current {})".
+ format(vnfr.name, msg.config_status, vnfr.config_status))
+ # Update the config and publish
+ yield from vnfr.update_config(msg, xact_info)
else:
raise NotImplementedError(
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
- self._log.debug("Registering for VNFR using xpath: %s",
- VnfrDtsHandler.XPATH,)
+ xpath = self._project.add_project(VnfrDtsHandler.XPATH)
+ self._log.debug("Registering for VNFR using xpath: {}".
+ format(xpath))
- hdl = rift.tasklets.DTS.RegistrationHandler(on_commit=on_commit,
- on_prepare=on_prepare,)
+ hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
- self._regh = group.register(xpath=VnfrDtsHandler.XPATH,
+ self._regh = group.register(xpath=xpath,
handler=hdl,
flags=(rwdts.Flag.PUBLISHER |
+ rwdts.Flag.SHARED |
rwdts.Flag.NO_PREP_READ |
- rwdts.Flag.CACHE |
rwdts.Flag.DATASTORE),)
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VNFR xact = %s, %s:%s",
xact, path, msg)
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg, flags=rwdts.XactFlag.REPLACE):
"""
Update a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VNFR xact = %s, %s:%s",
xact, path, msg)
- self.regh.update_element(path, msg)
+ self.regh.update_element(path, msg, flags)
self._log.debug("Updated VNFR xact = %s, %s:%s",
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VNFR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VNFR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VNFR xact = %s, %s", xact, path)
""" Return the NS manager instance """
return self._vnfm
+ def deregister(self):
+ '''De-register from DTS'''
+ self._log.debug("De-register VNFD Ref DTS handler for project {}".
+ format(self._vnfm._project))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for VNFD ref count read from dts """
)
if action == rwdts.QueryAction.READ:
- schema = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount.schema()
+ schema = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount.schema()
path_entry = schema.keyspec_to_entry(ks_path)
vnfd_list = yield from self._vnfm.get_vnfd_refcount(path_entry.key00.vnfd_id_ref)
for xpath, msg in vnfd_list:
hdl = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
with self._dts.group_create() as group:
- self._regh = group.register(xpath=VnfdRefCountDtsHandler.XPATH,
+ self._regh = group.register(xpath=self._vnfm._project.add_project(
+ VnfdRefCountDtsHandler.XPATH),
handler=hdl,
flags=rwdts.Flag.PUBLISHER,
)
set_if_not_none('mgmt.ip', vdur.vm_management_ip)
# The below can be used for hostname
set_if_not_none('vdur_name', vdur.unique_short_name)
-
+ set_if_not_none('custom_meta_data', vdur._vdud.supplemental_boot_data.custom_meta_data)
+
def update(self, vdur):
"""Update the VDUR information in the datastore
set_or_delete('mgmt.ip', vdur.vm_management_ip)
# The below can be used for hostname
set_or_delete('vdur_name', vdur.unique_short_name)
+ set_or_delete('custom_meta_data', vdur._vdud.supplemental_boot_data.custom_meta_data)
def remove(self, vdur_id):
"""Remove all of the data associated with specified VDUR
The requested data or None
"""
+
result = self._pattern.match(expr)
if result is None:
raise ValueError('data expression not recognized ({})'.format(expr))
class VnfManager(object):
""" The virtual network function manager class """
- def __init__(self, dts, log, loop, cluster_name):
+ def __init__(self, dts, log, loop, project, cluster_name):
self._dts = dts
self._log = log
self._loop = loop
+ self._project = project
self._cluster_name = cluster_name
- self._vcs_handler = VcsComponentDtsHandler(dts, log, loop, self)
- self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+ # This list maintains a list of all the deleted vnfrs' ids. This is done to be able to determine
+ # if the vnfr is not found because of restart or simply because it was deleted. In the first case we
+ # recreate the vnfr while in the latter we do not.
+ self._deleted_vnfrs = []
+
+ self._vnfr_handler = VnfrDtsHandler(dts, log, loop, self)
+ self._vnfd_handler = VnfdDtsHandler(dts, log, loop, self)
self._vnfr_ref_handler = VnfdRefCountDtsHandler(dts, log, loop, self)
- self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(log, dts, loop, callback=self.handle_nsr)
+ self._nsr_handler = mano_dts.NsInstanceConfigSubscriber(
+ log, dts, loop, project, callback=self.handle_nsr)
+ self._vlr_handler = subscriber.VlrSubscriberDtsHandler(log, dts, loop, project,
+ callback=self.vlr_event)
- self._dts_handlers = [VnfdDtsHandler(dts, log, loop, self),
+ self._dts_handlers = [self._vnfd_handler,
self._vnfr_handler,
- self._vcs_handler,
self._vnfr_ref_handler,
- self._nsr_handler]
+ self._nsr_handler,
+ self._vlr_handler
+ ]
self._vnfrs = {}
self._vnfds_to_vnfr = {}
self._nsrs = {}
+ self._vnfr_for_vlr = {}
@property
def vnfr_handler(self):
return self._vnfr_handler
@property
- def vcs_handler(self):
- """ VCS dts handler """
- return self._vcs_handler
+ def vnfr_ref_handler(self):
+ """ VNFR dts handler """
+ return self._vnfr_ref_handler
@asyncio.coroutine
def register(self):
for hdl in self._dts_handlers:
yield from hdl.register()
+ def deregister(self):
+ self._log.debug("De-register VNFM project {}".format(self._project.name))
+ for hdl in self._dts_handlers:
+ hdl.deregister()
+
@asyncio.coroutine
def run(self):
""" Run this VNFM instance """
yield from self.register()
def handle_nsr(self, nsr, action):
- if action in [rwdts.QueryAction.CREATE]:
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
self._nsrs[nsr.id] = nsr
elif action == rwdts.QueryAction.DELETE:
if nsr.id in self._nsrs:
del self._nsrs[nsr.id]
- def get_linked_mgmt_network(self, vnfr):
+ def get_nsr_config(self, nsr_id):
+ """
+ Gets the NSR config from the DTS cache.
+ Called in recovery mode only.
+ """
+ if nsr_id in self._nsrs:
+ return self._nsrs[nsr_id]
+
+ if len(self._nsrs):
+ self._log.error("VNFR with id {} not found".format(nsr_id))
+ return None
+
+ curr_cfgs = list(self._nsr_handler.reg.elements)
+ key_map = { getattr(cfg, self._nsr_handler.key_name()): cfg for cfg in curr_cfgs }
+ curr_cfgs = [key_map[key] for key in key_map]
+
+ for cfg in curr_cfgs:
+ self._nsrs[cfg.id] = cfg
+
+ if nsr_id in self._nsrs:
+ return self._nsrs[nsr_id]
+
+ self._log.error("VNFR with id {} not found in DTS cache".format(nsr_id))
+ return None
+
+
+ def get_linked_mgmt_network(self, vnfr, restart_mode=False):
"""For the given VNFR get the related mgmt network from the NSD, if
available.
"""
vnfd_id = vnfr.vnfd.id
nsr_id = vnfr.nsr_id_ref
+ if restart_mode:
+ self._nsrs[nsr_id] = self.get_nsr_config(vnfr.nsr_id_ref)
+
# for the given related VNFR, get the corresponding NSR-config
nsr_obj = None
try:
# network
for vld in nsr_obj.nsd.vld:
if vld.mgmt_network:
- return vld.name
+ for vnfd in vld.vnfd_connection_point_ref:
+ if vnfd.vnfd_id_ref == vnfd_id:
+ if vld.vim_network_name is not None:
+ mgmt_net = vld.vim_network_name
+ else:
+ mgmt_net = self._project.name + "." + nsr_obj.name + "." + vld.name
+ return mgmt_net
return None
""" get VNFR by vnfr id """
if vnfr_id not in self._vnfrs:
- raise VnfRecordError("VNFR id %s not found", vnfr_id)
+ self._log.error("VNFR id {} not found".format(vnfr_id))
+ return None
+ # Returning None to prevent exception here. The caller raises the exception.
+ # raise VnfRecordError("VNFR id %s not found", vnfr_id)
return self._vnfrs[vnfr_id]
- def create_vnfr(self, vnfr):
+ def create_vnfr(self, vnfr, restart_mode=False):
+ # Check if NSR is present. This is a situation where the NS has been deleted before
+ # VNFR Create starts.
+ if vnfr.nsr_id_ref not in self._nsrs:
+ return None
+
""" Create a VNFR instance """
if vnfr.id in self._vnfrs:
msg = "Vnfr id %s already exists" % vnfr.id
vnfr.id,
vnfr.vnfd.id)
- mgmt_network = self.get_linked_mgmt_network(vnfr)
+ try:
+ mgmt_network = self.get_linked_mgmt_network(vnfr, restart_mode)
+ except Exception as e:
+ self._log.exception(e)
+ raise e
+
+ # Identify if we are using Rift RO or external RO
+ external_ro = False
+ nsr = self._nsrs[vnfr.nsr_id_ref]
+ if (nsr.resource_orchestrator and
+ nsr.resource_orchestrator != 'rift'):
+ self._log.debug("VNFR {} using external RO".
+ format(vnfr.name))
+ external_ro = True
self._vnfrs[vnfr.id] = VirtualNetworkFunctionRecord(
- self._dts, self._log, self._loop, self._cluster_name, self, self.vcs_handler, vnfr,
- mgmt_network=mgmt_network
+ self._dts, self._log, self._loop, self._cluster_name, self, vnfr,
+ mgmt_network=mgmt_network, external_ro=external_ro,
)
#Update ref count
self._vnfds_to_vnfr[vnfr.vnfd.id] -= 1
del self._vnfrs[vnfr.vnfr_id]
+ self._deleted_vnfrs.append(vnfr.vnfr_id)
@asyncio.coroutine
def fetch_vnfd(self, vnfd_id):
""" Fetch VNFDs based with the vnfd id"""
- vnfd_path = VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id)
+ vnfd_path = self._project.add_project(
+ VirtualNetworkFunctionRecord.vnfd_xpath(vnfd_id))
self._log.debug("Fetch vnfd with path %s", vnfd_path)
vnfd = None
- res_iter = yield from self._dts.query_read(vnfd_path, rwdts.XactFlag.MERGE)
+ res_iter = yield from self._dts.query_read(vnfd_path,
+ rwdts.XactFlag.MERGE)
for ent in res_iter:
res = yield from ent
del self._vnfds_to_vnfr[vnfd_id]
- # Remove any files uploaded with VNFD and stored under $RIFT_ARTIFACTS/libs/<id>
- try:
- rift_artifacts_dir = os.environ['RIFT_ARTIFACTS']
- vnfd_dir = os.path.join(rift_artifacts_dir, 'launchpad/libs', vnfd_id)
- if os.path.exists(vnfd_dir):
- shutil.rmtree(vnfd_dir, ignore_errors=True)
- except Exception as e:
- self._log.error("Exception in cleaning up VNFD {}: {}".
- format(self._vnfds_to_vnfr[vnfd_id].vnfd.name, e))
- self._log.exception(e)
-
-
def vnfd_refcount_xpath(self, vnfd_id):
""" xpath for ref count entry """
- return (VnfdRefCountDtsHandler.XPATH +
- "[rw-vnfr:vnfd-id-ref = '{}']").format(vnfd_id)
+ return self._project.add_project(VnfdRefCountDtsHandler.XPATH +
+ "[rw-vnfr:vnfd-id-ref={}]").format(quoted_key(vnfd_id))
@asyncio.coroutine
def get_vnfd_refcount(self, vnfd_id):
vnfd_list = []
if vnfd_id is None or vnfd_id == "":
for vnfd in self._vnfds_to_vnfr.keys():
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd), vnfd_msg))
elif vnfd_id in self._vnfds_to_vnfr:
- vnfd_msg = RwVnfrYang.YangData_Vnfr_VnfrCatalog_VnfdRefCount()
+ vnfd_msg = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_VnfdRefCount()
vnfd_msg.vnfd_id_ref = vnfd_id
vnfd_msg.instance_ref_count = self._vnfds_to_vnfr[vnfd_id]
vnfd_list.append((self.vnfd_refcount_xpath(vnfd_id), vnfd_msg))
return vnfd_list
+ def add_vlr_id_vnfr_map(self, vlr_id, vnfr):
+ """ Add a mapping for vlr_id into VNFR """
+ self._vnfr_for_vlr[vlr_id] = vnfr
+
+ def remove_vlr_id_vnfr_map(self, vlr_id):
+ """ Remove a mapping for vlr_id into VNFR """
+ del self._vnfr_for_vlr[vlr_id]
+
+ def find_vnfr_for_vlr_id(self, vlr_id):
+ """ Find VNFR for VLR id """
+ vnfr = None
+ if vlr_id in self._vnfr_for_vlr:
+ vnfr = self._vnfr_for_vlr[vlr_id]
+
+ def vlr_event(self, vlr, action):
+ """ VLR event handler """
+ self._log.debug("VnfManager: Received VLR %s with action:%s", vlr, action)
+
+ if vlr.id not in self._vnfr_for_vlr:
+ self._log.warning("VLR %s:%s received for unknown id; %s",
+ vlr.id, vlr.name, vlr)
+ return
+ vnfr = self._vnfr_for_vlr[vlr.id]
+
+ vnfr.vlr_event(vlr, action)
+
+
+class VnfmProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnfmProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vnfm = None
+
+ @asyncio.coroutine
+ def register (self):
+ try:
+ vm_parent_name = self._tasklet.tasklet_info.get_parent_vm_parent_instance_name()
+ assert vm_parent_name is not None
+ self._vnfm = VnfManager(self._dts, self.log, self.loop, self, vm_parent_name)
+ yield from self._vnfm.run()
+ except Exception:
+ print("Caught Exception in VNFM init:", sys.exc_info()[0])
+ raise
+
+ def deregister(self):
+ self._log.debug("De-register project {} for VnfmProject".
+ format(self.name))
+ self._vnfm.deregister()
+
+ @asyncio.coroutine
+ def delete_prepare(self):
+ if self._vnfm and self._vnfm._vnfrs:
+ delete_msg = "Project has VNFR associated with it. Delete all Project NSR and try again."
+ return False, delete_msg
+ return True, "True"
class VnfmTasklet(rift.tasklets.Tasklet):
""" VNF Manager tasklet class """
self.rwlog.set_subcategory("vnfm")
self._dts = None
- self._vnfm = None
+ self._project_handler = None
+ self.projects = {}
+
+ @property
+ def dts(self):
+ return self._dts
def start(self):
try:
self.log.debug("Created DTS Api GI Object: %s", self._dts)
except Exception:
- print("Caught Exception in VNFM start:", sys.exc_info()[0])
+ self._log.error("Caught Exception in VNFM start:", sys.exc_info()[0])
raise
def on_instance_started(self):
try:
self._dts.deinit()
except Exception:
- print("Caught Exception in VNFM stop:", sys.exc_info()[0])
+ self._log.error("Caught Exception in VNFM stop:", sys.exc_info()[0])
raise
@asyncio.coroutine
def init(self):
""" Task init callback """
- try:
- vm_parent_name = self.tasklet_info.get_parent_vm_parent_instance_name()
- assert vm_parent_name is not None
- self._vnfm = VnfManager(self._dts, self.log, self.loop, vm_parent_name)
- yield from self._vnfm.run()
- except Exception:
- print("Caught Exception in VNFM init:", sys.exc_info()[0])
- raise
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnfmProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import rift.mano.dts as mano_dts
+import asyncio
+
+from gi.repository import (
+ RwDts as rwdts,
+ RwTypes,
+ RwVlrYang,
+ RwYang
+ )
+import rift.tasklets
+
+import requests
+
+
+class VlrSubscriberDtsHandler(mano_dts.AbstractOpdataSubscriber):
+ """ VLR DTS handler """
+ XPATH = "D,/vlr:vlr-catalog/vlr:vlr"
+
+ def __init__(self, log, dts, loop, project, callback=None):
+ super().__init__(log, dts, loop, project, callback)
+
+ def get_xpath(self):
+ return ("D,/vlr:vlr-catalog/vlr:vlr")
##
# This function creates an install target for the plugin artifacts
##
-rift_install_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py)
+rift_install_gobject_python_plugin(${TASKLET_NAME} ${TASKLET_NAME}.py COMPONENT ${INSTALL_COMPONENT})
# Workaround RIFT-6485 - rpmbuild defaults to python2 for
# anything not in a site-packages directory so we have to
rift/topmgr/sdnsim.py
rift/tasklets/${TASKLET_NAME}/__init__.py
rift/tasklets/${TASKLET_NAME}/${TASKLET_NAME}.py
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
PYTHON3_ONLY)
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
)
import rift.tasklets
+from rift.mano.utils.project import (
+ ManoProject,
+ ProjectHandler,
+)
import rift.mano.sdn
from rift.vlmgr import (
VlrDtsHandler,
VldDtsHandler,
VirtualLinkRecord,
+ VirtualLinkEventListener
)
from rift.topmgr import (
class SDNAccountHandlers(object):
- def __init__(self, dts, log, log_hdl, acctstore, loop):
+ def __init__(self, dts, log, log_hdl, acctstore, loop, project):
self._log = log
self._log_hdl = log_hdl
self._dts = dts
self._loop = loop
self._acctstore = acctstore
+ self._project = project
self._log.debug("Creating SDN account config handler")
self.sdn_cfg_handler = rift.mano.sdn.SDNAccountConfigSubscriber(
- self._dts, self._log, self._log_hdl,
+ self._dts, self._log, project, self._log_hdl,
rift.mano.sdn.SDNAccountConfigCallbacks(
on_add_apply=self.on_sdn_account_added,
on_delete_apply=self.on_sdn_account_deleted,
self._log.debug("Creating SDN account opdata handler")
self.sdn_operdata_handler = rift.mano.sdn.SDNAccountDtsOperdataHandler(
- self._dts, self._log, self._loop,
+ self._dts, self._log, self._loop, project,
)
def on_sdn_account_deleted(self, account_name):
self.sdn_cfg_handler.register()
yield from self.sdn_operdata_handler.register()
+ def deregister(self):
+ self.sdn_cfg_handler.deregister()
+ self.sdn_operdata_handler.deregister()
+
class VnsManager(object):
""" The Virtual Network Service Manager """
- def __init__(self, dts, log, log_hdl, loop):
+ def __init__(self, dts, log, log_hdl, loop, project):
self._dts = dts
self._log = log
self._log_hdl = log_hdl
self._loop = loop
+ self._project = project
self._acctstore = {}
self._vlr_handler = VlrDtsHandler(dts, log, loop, self)
self._vld_handler = VldDtsHandler(dts, log, loop, self)
- self._sdn_handlers = SDNAccountHandlers(dts, log, log_hdl, self._acctstore, loop)
+ self._sdn_handlers = SDNAccountHandlers(dts, log, log_hdl, self._acctstore, loop, self._project)
self._nwtopdata_store = NwtopDataStore(log)
- self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, self._acctstore, self._nwtopdata_store)
- self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, self._acctstore, self._nwtopdata_store)
+ self._nwtopdiscovery_handler = NwtopDiscoveryDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
+ self._nwtopstatic_handler = NwtopStaticDtsHandler(dts, log, loop, project,
+ self._acctstore, self._nwtopdata_store)
+ self._vl_event_listener = VirtualLinkEventListener(dts, log, loop, self)
self._vlrs = {}
@asyncio.coroutine
self._log.debug("Registering discovery-based DTS NW topology handler")
yield from self._nwtopdiscovery_handler.register()
+ @asyncio.coroutine
+ def register_vl_event_listener(self):
+ """ Register Virtual Link related events DTS handler """
+ self._log.debug("Registering Virtual Link Event listener")
+ yield from self._vl_event_listener.register()
+
@asyncio.coroutine
def register(self):
""" Register all static DTS handlers"""
yield from self.register_vld_handler()
yield from self.register_nwtopstatic_handler()
yield from self.register_nwtopdiscovery_handler()
+ yield from self.register_vl_event_listener()
+
+ def deregister(self):
+ self._vl_event_listener.deregister()
+ self._nwtopdiscovery_handler.deregister()
+ self._nwtopstatic_handler.deregister()
+ self._vld_handler.deregister()
+ self._vlr_handler.deregister()
+ self._sdn_handlers.deregister()
def create_vlr(self, msg):
""" Create VLR """
self._loop,
self,
msg,
- msg.res_id
)
return self._vlrs[msg.id]
del self._vlrs[vlr_id]
self._log.info("Deleted virtual link id %s", vlr_id)
- def find_vlr_by_vld_id(self, vld_id):
+ def find_vlR_by_vld_id(self, vld_id):
""" Find a VLR matching the VLD Id """
for vlr in self._vlrs.values():
if vlr.vld_id == vld_id:
return False
@asyncio.coroutine
- def publish_vlr(self, xact, path, msg):
+ def publish_vlr(self, xact, xpath, msg):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Publish vlr called with path %s, msg %s",
path, msg)
yield from self._vlr_handler.update(xact, path, msg)
@asyncio.coroutine
- def unpublish_vlr(self, xact, path):
+ def unpublish_vlr(self, xact, xpath):
""" Publish a VLR """
+ path = self._project.add_project(xpath)
self._log.debug("Unpublish vlr called with path %s", path)
yield from self._vlr_handler.delete(xact, path)
+ def create_virual_link_event(self, event_id, event_msg):
+ """ Update Virtual Link Event """
+ self._log.debug("Creating Virtual Link Event id [%s], msg [%s]",
+ event_id, event_msg)
+
+ @asyncio.coroutine
+ def update_virual_link_event(self, event_id, event_msg):
+ """ Update Virtual Link Event """
+ self._log.debug("Updating Virtual Link Event id [%s], msg [%s]",
+ event_id, event_msg)
+ # event id and vlr_id are the same.
+ # Use event id to look up the VLR and update and publish state change
+ vlr = None
+
+ if event_id in self._vlrs:
+ vlr = self._vlrs[event_id]
+
+ if vlr is None:
+ self._log.error("Received VLR Event notifcation for unknown VLR - event-id:%s",
+ event_id)
+ return
+
+ if event_msg.resource_info.resource_state == 'active':
+ with self._dts.transaction(flags=0) as xact:
+ yield from vlr.ready(event_msg, xact)
+ elif event_msg.resource_info.resource_state == 'failed':
+ with self._dts.transaction(flags=0) as xact:
+ if event_msg.resource_info.resource_errors:
+ vlr._state_failed_reason = str(event_msg.resource_info.resource_errors)
+ yield from vlr.failed(event_msg, xact)
+ else:
+ self._log.warning("Receieved unknown resource state %s for event id %s vlr:%s",
+ event_msg.resource_info.resource_state, event_id, vlr.name)
+
+ def delete_virual_link_event(self, event_id):
+ """ Delete Virtual Link Event """
+ self._log.debug("Deleting Virtual Link Event id [%s]",
+ event_id)
+
+
+class VnsProject(ManoProject):
+
+ def __init__(self, name, tasklet, **kw):
+ super(VnsProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self._vlr_handler = None
+ self._vnsm = None
+ # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
+ self._vlrs = {}
+
+ @asyncio.coroutine
+ def register (self):
+ try:
+ self._vnsm = VnsManager(dts=self._dts,
+ log=self.log,
+ log_hdl=self.log_hdl,
+ loop=self.loop,
+ project=self)
+ yield from self._vnsm.run()
+ except Exception as e:
+ self.log.exception("VNS Task failed to run", e)
+
+ def deregister(self):
+ self._log.debug("De-register project {}".format(self.name))
+ self._vnsm.deregister()
+
class VnsTasklet(rift.tasklets.Tasklet):
""" The VNS tasklet class """
self.rwlog.set_subcategory("vns")
self._dts = None
- self._vlr_handler = None
+ self._project_handler = None
+ self.projects = {}
- self._vnsm = None
- # A mapping of instantiated vlr_id's to VirtualLinkRecord objects
- self._vlrs = {}
+ @property
+ def dts(self):
+ return self._dts
def start(self):
super(VnsTasklet, self).start()
self.log.info("Starting VnsTasklet")
self.log.debug("Registering with dts")
- self._dts = rift.tasklets.DTS(self.tasklet_info,
- RwVnsYang.get_schema(),
- self.loop,
- self.on_dts_state_change)
+ try:
+ self._dts = rift.tasklets.DTS(self.tasklet_info,
+ RwVnsYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change)
+ except Exception:
+ self.log.exception("Caught Exception in VNS start:", e)
self.log.debug("Created DTS Api GI Object: %s", self._dts)
@asyncio.coroutine
def init(self):
""" task init callback"""
- self._vnsm = VnsManager(dts=self._dts,
- log=self.log,
- log_hdl=self.log_hdl,
- loop=self.loop)
- yield from self._vnsm.run()
-
- # NSM needs to detect VLD deletion that has active VLR
- # self._vld_handler = VldDescriptorConfigDtsHandler(
- # self._dts, self.log, self.loop, self._vlrs,
- # )
- # yield from self._vld_handler.register()
+ self.log.debug("creating project handler")
+ self.project_handler = ProjectHandler(self, VnsProject)
+ self.project_handler.register()
@asyncio.coroutine
def run(self):
-
#
# Copyright 2016 RIFT.IO Inc
#
#
import asyncio
-
import gi
+
gi.require_version('RwDts', '1.0')
gi.require_version('RwcalYang', '1.0')
gi.require_version('RwTypes', '1.0')
from gi.repository.RwTypes import RwStatus
import rift.tasklets
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
class NwtopDiscoveryDtsHandler(object):
""" Handles DTS interactions for the Discovered Topology registration """
DISC_XPATH = "D,/nd:network"
- def __init__(self, dts, log, loop, acctstore, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
- self._acctstore = acctstore
+ self._project = project
+ self._acctmgr = acctmgr
self._nwdatastore = nwdatastore
self._regh = None
""" The registration handle associated with this Handler"""
return self._regh
+ def deregister(self):
+ self._log.debug("De-register Topology discovery handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Discovered Topology path """
nw.server_provided = False
nw.network_id = name + ':' + nw.network_id
self._log.debug("...Network id %s", nw.network_id)
- nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+ nw_xpath = ("D,/nd:network[network-id={}]").format(quoted_key(nw.network_id))
xact_info.respond_xpath(rwdts.XactRspCode.MORE,
nw_xpath, nw)
on_prepare=on_prepare,
)
- yield from self._dts.register(
+ self._regh = yield from self._dts.register(
NwtopDiscoveryDtsHandler.DISC_XPATH,
flags=rwdts.Flag.PUBLISHER,
handler=handler
""" Handles DTS interactions for the Static Topology registration """
STATIC_XPATH = "C,/nd:network"
- def __init__(self, dts, log, loop, acctstore, nwdatastore):
+ def __init__(self, dts, log, loop, project, acctmgr, nwdatastore):
self._dts = dts
self._log = log
self._loop = loop
- self._acctstore = acctstore
+ self._project = project
+ self._acctmgr = acctmgr
self._regh = None
self.pending = {}
def regh(self):
""" The registration handle associated with this Handler"""
return self._regh
-
-
+
+ def deregister(self):
+ self._log.debug("De-register Topology static handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
def register(self):
""" Register for the Static Topology path """
on_apply=apply_nw_config)
with self._dts.appconf_group_create(handler=handler) as acg:
- acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
- flags = rwdts.Flag.SUBSCRIBER,
- on_prepare=prepare_nw_cfg)
-
-
+ self._regh = acg.register(xpath = NwtopStaticDtsHandler.STATIC_XPATH,
+ flags = rwdts.Flag.SUBSCRIBER,
+ on_prepare=prepare_nw_cfg)
# limitations under the License.
#
-from . import core
+import gi
import logging
+from . import core
+
import xml.etree.ElementTree as etree
from gi.repository import RwTopologyYang as RwTl
-import gi
gi.require_version('RwYang', '1.0')
from gi.repository import RwYang
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
logger = logging.getLogger(__name__)
class SdnSim(core.Topology):
def __init__(self):
super(SdnSim, self).__init__()
- self._model = RwYang.Model.create_libncx()
+ self._model = RwYang.Model.create_libyang()
self._model.load_schema_ypbc(RwTl.get_schema())
def get_network_list(self, account):
for nw in nwtop.network:
nw.server_provided = False
logger.debug("...Network id %s", nw.network_id)
- #nw_xpath = ("D,/nd:network[network-id=\'{}\']").format(nw.network_id)
+ #nw_xpath = ("D,/nd:network[network-id={}]").format(quoted_key(nw.network_id))
#xact_info.respond_xpath(rwdts.XactRspCode.MORE,
# nw_xpath, nw)
elif 'xml' in topology_source:
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Author(s): Ravi Chamarty
# Creation Date: 9/2/2015
-#
+#
from .rwvlmgr import (
VirtualLinkRecordState,
VirtualLinkRecord,
VlrDtsHandler,
VldDtsHandler,
+ VirtualLinkEventListener,
)
-
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import asyncio
import enum
-import uuid
+import gi
import time
+import uuid
-import gi
gi.require_version('RwVlrYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
RwDts as rwdts,
RwResourceMgrYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.tasklets
"""
Virtual Link Record object
"""
- def __init__(self, dts, log, loop, vnsm, vlr_msg, req_id=None):
+ def __init__(self, dts, log, loop, vnsm, vlr_msg):
self._dts = dts
self._log = log
self._loop = loop
self._vnsm = vnsm
self._vlr_msg = vlr_msg
+ self._vlr_id = self._vlr_msg.id
+ self._project = vnsm._project
self._network_id = None
self._network_pool = None
self._assigned_subnet = None
+ self._virtual_cps = list()
self._create_time = int(time.time())
- if req_id == None:
- self._request_id = str(uuid.uuid4())
- else:
- self._request_id = req_id
self._state = VirtualLinkRecordState.INIT
self._state_failed_reason = None
+ self._name = self._vlr_msg.name
@property
def vld_xpath(self):
""" VLD xpath associated with this VLR record """
- return "C,/vld:vld-catalog/vld:vld[id='{}']".format(self.vld_id)
+ return self._project.add_project("C,/vld:vld-catalog/vld:vld[id={}]".
+ format(quoted_key(self.vld_id)))
@property
def vld_id(self):
@property
def vlr_id(self):
""" VLR id associated with this VLR record """
- return self._vlr_msg.id
+ return self._vlr_id
@property
def xpath(self):
""" path for this VLR """
- return("D,/vlr:vlr-catalog"
- "/vlr:vlr[vlr:id='{}']".format(self.vlr_id))
+ return self._project.add_project("D,/vlr:vlr-catalog"
+ "/vlr:vlr[vlr:id={}]".format(quoted_key(self.vlr_id)))
@property
def name(self):
""" Name of this VLR """
- return self._vlr_msg.name
+ return self._name
+
+ @property
+ def datacenter(self):
+ """ RO Account to instantiate the virtual link on """
+ return self._vlr_msg.datacenter
@property
- def cloud_account_name(self):
- """ Cloud Account to instantiate the virtual link on """
- return self._vlr_msg.cloud_account
+ def event_id(self):
+ """ Event Identifier for this virtual link """
+ return self._vlr_id
@property
def resmgr_path(self):
""" path for resource-mgr"""
- return ("D,/rw-resource-mgr:resource-mgmt" +
- "/vlink-event/vlink-event-data[event-id='{}']".format(self._request_id))
+ return self._project.add_project("D,/rw-resource-mgr:resource-mgmt" +
+ "/vlink-event/vlink-event-data[event-id={}]".format(quoted_key(self.event_id)))
@property
def operational_status(self):
@property
def msg(self):
""" VLR message for this VLR """
- msg = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr()
+ msg = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr()
msg.copy_from(self._vlr_msg)
if self._network_id is not None:
if self._assigned_subnet is not None:
msg.assigned_subnet = self._assigned_subnet
+ if self._virtual_cps:
+ for cp in msg.virtual_connection_points:
+ for vcp in self._virtual_cps:
+ if cp.name == vcp['name']:
+ cp.ip_address = vcp['ip_address']
+ cp.mac_address = vcp['mac_address']
+ cp.connection_point_id = vcp['connection_point_id']
+ break
msg.operational_status = self.operational_status
msg.operational_status_details = self._state_failed_reason
- msg.res_id = self._request_id
-
+ msg.res_id = self.event_id
return msg
@property
def resmgr_msg(self):
""" VLR message for this VLR """
- msg = RwResourceMgrYang.VirtualLinkEventData()
- msg.event_id = self._request_id
- msg.cloud_account = self.cloud_account_name
+ msg = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData()
+ msg.event_id = self.event_id
+ msg.cloud_account = self.datacenter
msg.request_info.name = self.name
msg.request_info.vim_network_name = self._vlr_msg.vim_network_name
msg.request_info.provider_network.from_dict(
if self._vlr_msg.has_field('ip_profile_params'):
msg.request_info.ip_profile_params.from_dict(self._vlr_msg.ip_profile_params.as_dict())
+ for cp in self._vlr_msg.virtual_connection_points:
+ vcp = msg.request_info.virtual_cps.add()
+ vcp.from_dict({k:v for k,v in cp.as_dict().items()
+ if k in ['name','port_security_enabled','type_yang']})
+ if (self._vlr_msg.has_field('ip_profile_params')) and (self._vlr_msg.ip_profile_params.has_field('security_group')):
+ vcp.security_group = self._vlr_msg.ip_profile_params.security_group
+
return msg
@asyncio.coroutine
def create_network(self, xact):
""" Create network for this VL """
- self._log.debug("Creating network req-id: %s", self._request_id)
- return (yield from self.request_network(xact, "create"))
+ self._log.debug("Creating network event-id: %s:%s", self.event_id, self._vlr_msg)
+ network_rsp = yield from self.request_network(xact, "create")
+ return network_rsp
@asyncio.coroutine
def delete_network(self, xact):
""" Delete network for this VL """
- self._log.debug("Deleting network - req-id: %s", self._request_id)
+ self._log.debug("Deleting network - event-id: %s", self.event_id)
return (yield from self.request_network(xact, "delete"))
@asyncio.coroutine
def read_network(self, xact):
""" Read network for this VL """
- self._log.debug("Reading network - req-id: %s", self._request_id)
+ self._log.debug("Reading network - event-id: %s", self.event_id)
return (yield from self.request_network(xact, "read"))
@asyncio.coroutine
block.add_query_create(self.resmgr_path, self.resmgr_msg)
elif action == "delete":
self._log.debug("Deleting network path:%s", self.resmgr_path)
- if self.resmgr_msg.request_info.name != "multisite":
- block.add_query_delete(self.resmgr_path)
+ block.add_query_delete(self.resmgr_path)
elif action == "read":
self._log.debug("Reading network path:%s", self.resmgr_path)
block.add_query_read(self.resmgr_path)
if resp.has_field('resource_info') and resp.resource_info.resource_state == "failed":
raise NetworkResourceError(resp.resource_info.resource_errors)
- if not (resp.has_field('resource_info') and
- resp.resource_info.has_field('virtual_link_id')):
+ if not resp.has_field('resource_info') :
raise NetworkResourceError("Did not get a valid network resource response (resp: %s)", resp)
self._log.debug("Got network request response: %s", resp)
try:
self._state = VirtualLinkRecordState.RESOURCE_ALLOC_PENDING
+ network_rsp = None
if restart == 0:
network_resp = yield from self.create_network(xact)
else:
network_resp = yield from self.read_network(xact)
if network_resp == None:
- network_resp = yield from self.create_network(xact)
-
- # Note network_resp.virtual_link_id is CAL assigned network_id.
+ network_resp = yield from self.create_network(xact)
- self._network_id = network_resp.resource_info.virtual_link_id
- self._network_pool = network_resp.resource_info.pool_name
- self._assigned_subnet = network_resp.resource_info.subnet
-
- self._state = VirtualLinkRecordState.READY
-
- yield from self.publish(xact)
+ if network_resp:
+ self._state = self.vl_state_from_network_resp(network_resp)
+ if self._state == VirtualLinkRecordState.READY:
+ # Move this VL into ready state
+ yield from self.ready(network_resp, xact)
+ else:
+ yield from self.publish(xact)
except Exception as e:
self._log.error("Instantiatiation of VLR record failed: %s", str(e))
self._state = VirtualLinkRecordState.FAILED
self._state_failed_reason = str(e)
yield from self.publish(xact)
+ def vl_state_from_network_resp(self, network_resp):
+ """ Determine VL state from network response """
+ if network_resp.resource_info.resource_state == 'pending':
+ return VirtualLinkRecordState.RESOURCE_ALLOC_PENDING
+ elif network_resp.resource_info.resource_state == 'active':
+ return VirtualLinkRecordState.READY
+ elif network_resp.resource_info.resource_state == 'failed':
+ return VirtualLinkRecordState.FAILED
+ return VirtualLinkRecordState.RESOURCE_ALLOC_PENDING
+
+ @asyncio.coroutine
+ def ready(self, event_resp, xact):
+ """ This virtual link is ready """
+ # Note network_resp.virtual_link_id is CAL assigned network_id.
+ self._log.debug("Virtual Link id %s name %s in ready state, event_rsp:%s",
+ self.vlr_id,
+ self.name,
+ event_resp)
+ self._network_id = event_resp.resource_info.virtual_link_id
+ self._network_pool = event_resp.resource_info.pool_name
+ self._assigned_subnet = event_resp.resource_info.subnet
+ self._virtual_cps = [ vcp.as_dict()
+ for vcp in event_resp.resource_info.virtual_connection_points ]
+
+ yield from self.publish(xact)
+
+ self._state = VirtualLinkRecordState.READY
+
+ yield from self.publish(xact)
+
+ @asyncio.coroutine
+ def failed(self, event_resp, xact):
+ """ This virtual link Failed """
+ self._log.debug("Virtual Link id %s name %s failed to instantiate, event_rsp:%s",
+ self.vlr_id,
+ self.name,
+ event_resp)
+
+ self._state = VirtualLinkRecordState.FAILED
+
+ yield from self.publish(xact)
+
@asyncio.coroutine
def publish(self, xact):
""" publish this VLR """
self._vnsm = vnsm
self._regh = None
+ self._project = vnsm._project
@property
def regh(self):
@asyncio.coroutine
def register(self):
""" Register for the VLR path """
- def on_commit(xact_info):
- """ The transaction has been committed """
- self._log.debug("Got vlr commit (xact_info: %s)", xact_info)
-
- return rwdts.MemberRspCode.ACTION_OK
@asyncio.coroutine
def on_event(dts, g_reg, xact, xact_event, scratch_data):
return
elif action == rwdts.QueryAction.DELETE:
# Delete an VLR record
- schema = RwVlrYang.YangData_Vlr_VlrCatalog_Vlr.schema()
+ schema = RwVlrYang.YangData_RwProject_Project_VlrCatalog_Vlr.schema()
path_entry = schema.keyspec_to_entry(ks_path)
self._log.debug("Terminating VLR id %s", path_entry.key00.id)
yield from self._vnsm.delete_vlr(path_entry.key00.id, xact_info.xact)
xact_info.respond_xpath(rwdts.XactRspCode.ACK)
return
- self._log.debug("Registering for VLR using xpath: %s",
- VlrDtsHandler.XPATH)
+ xpath = self._project.add_project(VlrDtsHandler.XPATH)
+ self._log.debug("Registering for VLR using xpath: {}".
+ format(xpath))
- reg_handle = rift.tasklets.DTS.RegistrationHandler(
- on_commit=on_commit,
- on_prepare=on_prepare,
- )
+ reg_handle = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare,)
handlers = rift.tasklets.Group.Handler(on_event=on_event,)
with self._dts.group_create(handler=handlers) as group:
self._regh = group.register(
- xpath=VlrDtsHandler.XPATH,
+ xpath=xpath,
handler=reg_handle,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ| rwdts.Flag.DATASTORE,
)
+ def deregister(self):
+ self._log.debug("De-register VLR handler for project {}".
+ format(self._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
@asyncio.coroutine
- def create(self, xact, path, msg):
+ def create(self, xact, xpath, msg):
"""
Create a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Creating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.create_element(path, msg)
xact, path, msg)
@asyncio.coroutine
- def update(self, xact, path, msg):
+ def update(self, xact, xpath, msg):
"""
Update a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Updating VLR xact = %s, %s:%s",
xact, path, msg)
self.regh.update_element(path, msg)
xact, path, msg)
@asyncio.coroutine
- def delete(self, xact, path):
+ def delete(self, xact, xpath):
"""
Delete a VLR record in DTS with path and message
"""
+ path = self._project.add_project(xpath)
self._log.debug("Deleting VLR xact = %s, %s", xact, path)
self.regh.delete_element(path)
self._log.debug("Deleted VLR xact = %s, %s", xact, path)
"Got on prepare for VLD update (ks_path: %s) (action: %s)",
ks_path.to_xpath(VldYang.get_schema()), msg)
- schema = VldYang.YangData_Vld_VldCatalog_Vld.schema()
+ schema = VldYang.YangData_RwProject_Project_VldCatalog_Vld.schema()
path_entry = schema.keyspec_to_entry(ks_path)
+ # TODO: Check why on project delete this gets called
+ if not path_entry:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ return
+
vld_id = path_entry.key00.id
disabled_actions = [rwdts.QueryAction.DELETE, rwdts.QueryAction.UPDATE]
handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
- yield from self._dts.register(
- VldDtsHandler.XPATH,
+ self._regh = yield from self._dts.register(
+ self._vnsm._project.add_project(VldDtsHandler.XPATH),
flags=rwdts.Flag.SUBSCRIBER,
handler=handler
)
+
+ def deregister(self):
+ self._log.debug("De-register VLD handler for project {}".
+ format(self._vnsm._project.name))
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
+
+class VirtualLinkEventListener(object):
+ """ DTS Listener to listen on Virtual Link related events """
+ XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+ def __init__(self, dts, log, loop, vnsm):
+ self._dts = dts
+ self._log = log
+ self._loop = loop
+ self._vnsm = vnsm
+ self._regh = None
+
+ @property
+ def regh(self):
+ """ The registration handle assocaited with this Handler"""
+ return self._regh
+
+ def event_id_from_keyspec(self, ks):
+ """ Get the event id from the keyspec """
+ event_pe = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData.schema().keyspec_to_entry(ks)
+ try:
+ # Can get just path without event id when
+ # deleting project
+ event_id = event_pe.key00.event_id
+ except AttributeError:
+ return None
+ return event_id
+
+ @asyncio.coroutine
+ def register(self):
+ """ Register the Virtual Link Event path """
+ @asyncio.coroutine
+ def on_prepare(xact_info, query_action, ks_path, msg):
+ """ prepare callback on Virtual Link Events """
+ try:
+ self._log.debug(
+ "Got on prepare for Virtual Link Event id (ks_path: %s) (msg: %s)",
+ ks_path.to_xpath(RwResourceMgrYang.get_schema()), msg)
+ event_id = self.event_id_from_keyspec(ks_path)
+ if event_id:
+ if query_action == rwdts.QueryAction.CREATE or query_action == rwdts.QueryAction.UPDATE:
+ yield from self._vnsm.update_virual_link_event(event_id, msg)
+ elif query_action == rwdts.QueryAction.DELETE:
+ self._vnsm.delete_virual_link_event(event_id)
+ except Exception as e:
+ self._log.exception("Caught execption in Virtual Link Event handler", e)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ handler = rift.tasklets.DTS.RegistrationHandler(on_prepare=on_prepare)
+
+ self._regh = yield from self._dts.register(
+ self._vnsm._project.add_project(VirtualLinkEventListener.XPATH),
+ flags=rwdts.Flag.SUBSCRIBER,
+ handler=handler
+ )
+
+ def deregister(self):
+ if self._regh:
+ self._regh.deregister()
+ self._regh = None
if __name__ == "__main__":
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_schema_ypbc(RwTl.get_schema())
# create logger
logger = logging.getLogger('Provider Network Topology')
if __name__ == "__main__":
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_schema_ypbc(RwTl.get_schema())
# create logger
logger = logging.getLogger('SFC Network Topology')
if __name__ == "__main__":
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_schema_ypbc(RwTl.get_schema())
# create logger
logger = logging.getLogger('VM Network Topology')
outf.write(line)
if __name__ == "__main__":
- model = RwYang.Model.create_libncx()
+ model = RwYang.Model.create_libyang()
model.load_schema_ypbc(RwTl.get_schema())
# create logger
logger = logging.getLogger(__file__)
def get_sdn_account():
"""
- Creates an object for class RwsdnalYang.SdnAccount()
+ Creates an object for class RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
"""
- account = RwsdnalYang.SDNAccount()
+ account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
account.account_type = "mock"
account.mock.username = "rift"
account.mock.plugin_name = "rwsdn_mock"
def get_sdn_account():
"""
- Creates an object for class RwsdnalYang.SdnAccount()
+ Creates an object for class RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
"""
- account = RwsdnalYang.SDNAccount()
+ account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
account.name = "grunt27"
account.account_type = "odl"
account.odl.plugin_name = "rwsdn_odl"
def get_sdn_account():
"""
- Creates an object for class RwsdnalYang.SdnAccount()
+ Creates an object for class RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
"""
- account = RwsdnalYang.SDNAccount()
+ account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
account.name = "grunt17"
account.account_type = "openstack"
account.openstack.plugin_name = "rwsdn_openstack"
def get_sdn_account():
"""
- Creates an object for class RwsdnalYang.SdnAccount()
+ Creates an object for class RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
"""
- account = RwsdnalYang.SDNAccount()
+ account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
account.account_type = "sdnsim"
account.sdnsim.username = "rift"
account.sdnsim.plugin_name = "rwsdn_sim"
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
rw_log_yang-1.0 rw_base_yang-1.0 rwcal_yang-1.0 rwsdnal_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
ietf_network_yang-1.0 ietf_network_topology_yang-1.0
ietf_l2_topology_yang-1.0 rw_topology_yang-1.0
- rw_log-1.0
+ rw_log-1.0 rw_project_yang-1.0 rw_user_yang-1.0 rw_rbac_base_yang-1.0
VAPI_DIRS
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwcal_yang rwsdnal_yang mano_yang rwlog_gi rwschema_yang
+ DEPENDS rwcal_yang rwsdnal_yang mano_yang rwlog_gi rwschema_yang rwproject_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
include(rift_plugin)
-rift_install_python_plugin(rwsdn-plugin rwsdn-plugin.py)
+rift_install_gobject_python_plugin(rwsdn-plugin rwsdn-plugin.py COMPONENT ${INSTALL_COMPONENT})
* Credential Validation related APIs
*/
public abstract RwTypes.RwStatus validate_sdn_creds(
- Rwsdnal.SDNAccount account,
- out Rwsdnal.SdnConnectionStatus status);
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
+ out Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList_ConnectionStatus status);
/*
* Configuring related APIs
* Network related APIs
*/
public abstract RwTypes.RwStatus get_network_list(
- Rwsdnal.SDNAccount account,
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
out RwTopology.YangData_IetfNetwork network_topology);
/*
* VNFFG Chain related APIs
*/
public abstract RwTypes.RwStatus create_vnffg_chain(
- Rwsdnal.SDNAccount account,
- Rwsdnal.VNFFGChain vnffg_chain,
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
+ Rwsdnal.YangData_RwProject_Project_Vnffgs_VnffgChain vnffg_chain,
out string vnffg_id);
/*
* VNFFG Chain Terminate related APIs
*/
public abstract RwTypes.RwStatus terminate_vnffg_chain(
- Rwsdnal.SDNAccount account,
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
string vnffg_id);
* Network related APIs
*/
public abstract RwTypes.RwStatus get_vnffg_rendered_paths(
- Rwsdnal.SDNAccount account,
- out Rwsdnal.VNFFGRenderedPaths rendered_paths);
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
+ out Rwsdnal.YangData_RwProject_Project_VnffgRenderedPaths rendered_paths);
/*
* Classifier related APIs
*/
public abstract RwTypes.RwStatus create_vnffg_classifier(
- Rwsdnal.SDNAccount account,
- Rwsdnal.VNFFGClassifier vnffg_classifier,
- out string vnffg_classifier_id);
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
+ Rwsdnal.YangData_RwProject_Project_VnffgClassifiers vnffg_classifier,
+ [CCode (array_length = false, array_null_terminated = true)]
+ out string [] vnffg_classifier_id);
/*
* Classifier related APIs
*/
public abstract RwTypes.RwStatus terminate_vnffg_classifier(
- Rwsdnal.SDNAccount account,
- string vnffg_classifier_id);
+ Rwsdnal.YangData_RwProject_Project_SdnAccounts_SdnAccountList account,
+ [CCode (array_length = false, array_null_terminated = true)]
+ string [] vnffg_classifier_id);
include(rift_plugin)
-rift_install_python_plugin(rwsdn_mock rwsdn_mock.py)
+rift_install_gobject_python_plugin(rwsdn_mock rwsdn_mock.py COMPONENT ${INSTALL_COMPONENT})
)
)
- account = RwsdnalYang.SDNAccount()
+ account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList()
account.name = 'mock'
account.account_type = 'mock'
account.mock.username = 'rift'
include(rift_plugin)
-rift_install_python_plugin(rwsdn_odl rwsdn_odl.py)
+rift_install_gobject_python_plugin(rwsdn_odl rwsdn_odl.py COMPONENT ${INSTALL_COMPONENT})
@param account - a SDN account
"""
+ classifier_list = list()
classifier_name = self.sdnodl.create_sfc_classifier(account,vnffg_classifier)
- return classifier_name
+ classifier_list.append(classifier_name)
+ return classifier_list
@rwstatus(ret_on_failure=[None])
def do_terminate_vnffg_classifier(self, account, vnffg_classifier_name):
"""
Validate the SDN account credentials by accessing the rest API using the provided credentials
"""
- status = RwsdnalYang.SdnConnectionStatus()
+ status = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList_ConnectionStatus()
url = '{}/{}'.format(account.odl.url,"restconf")
try:
r=requests.get(url,auth=(account.odl.username,account.odl.password))
self.delete_all_sf(account)
def _fill_rsp_list(self,sfc_rsp_list,sff_list):
- vnffg_rsps = RwsdnalYang.VNFFGRenderedPaths()
+ vnffg_rsps = RwsdnalYang.YangData_RwProject_Project_VnffgRenderedPaths()
for sfc_rsp in sfc_rsp_list['rendered-service-paths']['rendered-service-path']:
rsp = vnffg_rsps.vnffg_rendered_path.add()
rsp.name = sfc_rsp['name']
include(rift_plugin)
-rift_install_python_plugin(rwsdn_openstack rwsdn_openstack.py)
+rift_install_gobject_python_plugin(rwsdn_openstack rwsdn_openstack.py COMPONENT ${INSTALL_COMPONENT})
cert_validate = kwargs['cert_validate'] if 'cert_validate' in kwargs else False
region = kwargs['region_name'] if 'region_name' in kwargs else False
- discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'], logger = self.log)
+ discover = ks_drv.KeystoneVersionDiscover(kwargs['auth_url'],
+ cert_validate,
+ logger = self.log)
(major, minor) = discover.get_version()
self.sess_drv = sess_drv.SessionDriver(auth_method = 'password',
logger = self.log)
def validate_account_creds(self):
- status = RwsdnalYang.SdnConnectionStatus()
+ status = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList_ConnectionStatus()
try:
self.sess_drv.invalidate_auth_token()
self.sess_drv.auth_token
Returns:
Validation Code and Details String
"""
- status = RwsdnalYang.SdnConnectionStatus()
+ status = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList_ConnectionStatus()
try:
drv = self._use_driver(account)
drv.validate_account_creds()
else:
prev_vm_id = path.vnfr_ids[0].vdu_list[0].vm_id
port_list.append((path.vnfr_ids[0].vdu_list[0].port_id, path.vnfr_ids[0].vdu_list[0].port_id))
- vnffg_id = drv.create_port_chain(vnffg.name, port_list)
+ vnffg_id = drv.portchain_drv.create_port_chain(vnffg.name, port_list)
return vnffg_id
@rwstatus
@param account - a SDN account
"""
self.log.debug('Received get VNFFG rendered path for account %s ', account)
- vnffg_rsps = RwsdnalYang.VNFFGRenderedPaths()
+ vnffg_rsps = RwsdnalYang.YangData_RwProject_Project_VnffgRenderedPaths()
drv = self._use_driver(account)
port_chain_list = drv.get_port_chain_list()
for port_chain in port_chain_list:
include(rift_plugin)
-rift_install_python_plugin(rwsdn_sim rwsdn_sim.py)
+rift_install_gobject_python_plugin(rwsdn_sim rwsdn_sim.py COMPONENT ${INSTALL_COMPONENT})
Returns:
Validation Code and Details String
"""
- status = RwsdnalYang.SdnConnectionStatus()
+ status = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList_ConnectionStatus()
print("SDN Successfully connected")
status.status = "success"
status.details = "Connection was successful"
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
rift_add_yang_target(
TARGET rwsdnal_yang
YANG_FILES ${source_yang_files}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
- rwschema_yang_gen
- rwyang
- rwlog
- rwlog-mgmt_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
)
prefix rwbase;
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-yang-types {
prefix "rwt";
}
prefix "yang";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
revision 2014-12-30 {
description
grouping connection-status {
container connection-status {
config false;
- rwpb:msg-new SdnConnectionStatus;
leaf status {
type sdn-connection-status-enum;
}
}
}
- uses connection-status;
+ // uses connection-status;
typedef sdn-account-type {
description "SDN account type";
}
}
- container sdn-accounts {
- list sdn-account-list {
- rwpb:msg-new SDNAccount;
- key "name";
+ augment "/rw-project:project" {
+ container sdn-accounts {
+ list sdn-account-list {
+ key "name";
- leaf name {
- type string;
- }
+ leaf name {
+ type string;
+ }
- uses sdn-provider-auth;
- uses connection-status;
+ uses sdn-provider-auth;
+ uses connection-status;
+ }
}
}
- container vnffgs {
- list vnffg-chain {
- key "name";
- rwpb:msg-new VNFFGChain;
-
- leaf name {
- type string;
- }
-
- list vnf-chain-path {
- key "order";
- leaf order {
- type uint32;
- description " Order of the VNF in VNFFG chain";
- }
- leaf service-function-type {
- type string;
- }
- leaf nsh-aware {
- type boolean;
- }
- leaf transport-type {
+ augment "/rw-project:project" {
+ container vnffgs {
+ list vnffg-chain {
+ key "name";
+
+ leaf name {
type string;
}
- list vnfr-ids {
- key "vnfr-id";
- leaf vnfr-id {
- type yang:uuid;
+
+ list vnf-chain-path {
+ key "order";
+ leaf order {
+ type uint32;
+ description " Order of the VNF in VNFFG chain";
}
- leaf vnfr-name {
+ leaf service-function-type {
type string;
}
- leaf mgmt-address {
- type inet:ip-address;
+ leaf nsh-aware {
+ type boolean;
}
- leaf mgmt-port {
- type inet:port-number;
+ leaf transport-type {
+ type string;
}
- list vdu-list {
- key "vm-id port-id";
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
+ list vnfr-ids {
+ key "vnfr-id";
+ leaf vnfr-id {
+ type yang:uuid;
}
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
+ leaf vnfr-name {
type string;
}
- leaf name {
- type string;
- }
- leaf address {
+ leaf mgmt-address {
type inet:ip-address;
}
- leaf port {
+ leaf mgmt-port {
type inet:port-number;
}
+ list vdu-list {
+ key "vm-id port-id";
+ leaf port-id {
+ type string;
+ }
+ leaf vm-id {
+ type string;
+ }
+ leaf name {
+ type string;
+ }
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ leaf sff-name {
+ description "SFF name useful for non OVS based SFF";
+ type string;
+ }
}
- leaf sff-name {
- description "SFF name useful for non OVS based SFF";
- type string;
- }
}
- }
- list sff {
- rwpb:msg-new VNFFGSff;
- key "name";
- leaf name {
- type string;
- }
- leaf function-type {
- type string;
- }
- leaf mgmt-address {
- type inet:ip-address;
- }
- leaf mgmt-port {
- type inet:port-number;
- }
- list dp-endpoints {
+ list sff {
key "name";
leaf name {
- type string;
- }
- leaf address {
+ type string;
+ }
+ leaf function-type {
+ type string;
+ }
+ leaf mgmt-address {
type inet:ip-address;
}
- leaf port {
+ leaf mgmt-port {
type inet:port-number;
}
- }
- list vnfr-list {
- key "vnfr-name";
- leaf vnfr-name {
- type string;
+ list dp-endpoints {
+ key "name";
+ leaf name {
+ type string;
+ }
+ leaf address {
+ type inet:ip-address;
+ }
+ leaf port {
+ type inet:port-number;
+ }
+ }
+ list vnfr-list {
+ key "vnfr-name";
+ leaf vnfr-name {
+ type string;
+ }
}
}
- }
- leaf classifier-name {
- type string;
+ leaf classifier-name {
+ type string;
+ }
}
}
}
- container vnffg-rendered-paths {
- rwpb:msg-new VNFFGRenderedPaths;
- list vnffg-rendered-path {
- key "name";
- rwpb:msg-new VNFFGRenderedPath;
- config false;
- leaf name {
- type string;
- }
- leaf path-id {
- description
- "Unique Identifier for the service path";
- type uint32;
- }
- list rendered-path-hop {
- key "hop-number";
- leaf hop-number {
- type uint8;
- }
- leaf service-index {
- description
- "Location within the service path";
- type uint8;
- }
- leaf vnfr-name {
+ augment "/rw-project:project" {
+ container vnffg-rendered-paths {
+ list vnffg-rendered-path {
+ key "name";
+ config false;
+ leaf name {
type string;
}
- container service-function-forwarder {
- leaf name {
+ leaf path-id {
+ description
+ "Unique Identifier for the service path";
+ type uint32;
+ }
+ list rendered-path-hop {
+ key "hop-number";
+ leaf hop-number {
+ type uint8;
+ }
+ leaf service-index {
description
- "Service Function Forwarder name";
+ "Location within the service path";
+ type uint8;
+ }
+ leaf vnfr-name {
type string;
}
- leaf ip-address {
- description
+ container service-function-forwarder {
+ leaf name {
+ description
+ "Service Function Forwarder name";
+ type string;
+ }
+ leaf ip-address {
+ description
"Service Function Forwarder Data Plane IP address";
- type inet:ip-address;
- }
- leaf port {
- description
+ type inet:ip-address;
+ }
+ leaf port {
+ description
"Service Function Forwarder Data Plane port";
- type inet:port-number;
- }
+ type inet:port-number;
+ }
+ }
}
}
}
}
-
- container vnffg-classifiers {
- list vnffg-classifier {
- key "name";
- rwpb:msg-new VNFFGClassifier;
-
- leaf name {
- type string;
- }
- leaf rsp-name {
- type string;
- }
- leaf rsp-id {
- type yang:uuid;
- }
- leaf port-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf vm-id {
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- }
- leaf sff-name {
- type string;
- }
- container vnffg-metadata {
- leaf ctx1 {
+ augment "/rw-project:project" {
+ container vnffg-classifiers {
+ list vnffg-classifier {
+ key "name";
+
+ leaf name {
type string;
}
- leaf ctx2 {
+ leaf rsp-name {
type string;
}
- leaf ctx3 {
+ leaf rsp-id {
+ type yang:uuid;
+ }
+ leaf port-id {
type string;
}
- leaf ctx4 {
+ leaf vm-id {
type string;
}
- }
- list match-attributes {
- description
- "List of match attributes.";
- key "name";
- leaf name {
+ leaf sff-name {
+ type string;
+ }
+ container vnffg-metadata {
+ leaf ctx1 {
+ type string;
+ }
+ leaf ctx2 {
+ type string;
+ }
+ leaf ctx3 {
+ type string;
+ }
+ leaf ctx4 {
+ type string;
+ }
+ }
+ list match-attributes {
description
+ "List of match attributes.";
+ key "name";
+ leaf name {
+ description
"Name for the Access list";
- type string;
- }
+ type string;
+ }
- leaf ip-proto {
- description
+ leaf ip-proto {
+ description
"IP Protocol.";
- type uint8;
- }
+ type uint8;
+ }
- leaf source-ip-address {
- description
+ leaf source-ip-address {
+ description
"Source IP address.";
- type inet:ip-prefix;
- }
+ type inet:ip-prefix;
+ }
- leaf destination-ip-address {
- description
+ leaf destination-ip-address {
+ description
"Destination IP address.";
- type inet:ip-prefix;
- }
+ type inet:ip-prefix;
+ }
- leaf source-port {
- description
+ leaf source-port {
+ description
"Source port number.";
- type inet:port-number;
- }
+ type inet:port-number;
+ }
- leaf destination-port {
- description
+ leaf destination-port {
+ description
"Destination port number.";
- type inet:port-number;
- }
- } //match-attributes
+ type inet:port-number;
+ }
+ } //match-attributes
+ }
}
}
rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
- #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
- #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
GENERATE_HEADER_FILE ${VALA_NAME}.h
GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- #DEPENDS rwcal_yang rwlog_gi rwschema_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
include(rift_plugin)
-rift_install_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py)
+rift_install_gobject_python_plugin(rwos_ma_nfvo_rest rwos_ma_nfvo_rest.py COMPONENT ${INSTALL_COMPONENT})
rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
- #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
- #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
GENERATE_HEADER_FILE ${VALA_NAME}.h
GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- #DEPENDS rwcal_yang rwlog_gi rwschema_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
include(rift_plugin)
-rift_install_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py)
+rift_install_gobject_python_plugin(rwve_vnfm_em_rest rwve_vnfm_em_rest.py COMPONENT ${INSTALL_COMPONENT})
rw_types-1.0 rw_log_yang-1.0 rw_schema_proto-1.0 rw_yang_pb-1.0
rw_yang-1.0 protobuf_c-1.0 rw_keyspec-1.0 rw_log-1.0
- #VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
- #GIR_PATHS ${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
GENERATE_HEADER_FILE ${VALA_NAME}.h
GENERATE_SO_FILE lib${VALA_LONG_NAME}.so
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- #DEPENDS rwcal_yang rwlog_gi rwschema_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
include(rift_plugin)
-rift_install_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py)
+rift_install_gobject_python_plugin(rwve_vnfm_vnf_rest rwve_vnfm_vnf_rest.py COMPONENT ${INSTALL_COMPONENT})
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
${rw_monitor_log_file}
${rw_mon_params_log_file}
${rw_resource_mgr_log_file}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
LIBRARIES
mano_yang_gen
rwcloud_yang_gen
+ rwro_account_yang_gen
rw_conman_yang_gen
rwconfig_agent_yang_gen
mano-types_yang_gen
+ rwprojectmano_yang_gen
DEPENDS
mano_yang
rwcloud_yang
+ rwro_account_yang
rw_conman_yang
rwconfig_agent_yang
mano-types_yang
+ rwprojectmano_yang
+ # Added to make sure that the target is built,
+ # Not required b mano yang
+ rw_project_person_yang
+ ASSOCIATED_FILES
+ rw-pkg-mgmt.role.xml
+ rw-staging-mgmt.role.xml
+ rw-image-mgmt.role.xml
)
+rift_add_yang_target(
+ TARGET rw_project_person_yang
+ YANG_FILES
+ rw-project-person-db.yang
+ COMPONENT ${INSTALL_COMPONENT}
+ LIBRARIES
+ rwprojectmano_yang_gen
+ DEPENDS
+ rwprojectmano_yang
+ ASSOCIATED_FILES
+ rw-launchpad.role.xml
+)
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-image-mgmt-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:rw-image-mgmt-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-image-mgmt:upload-jobs</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:rw-image-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-image-mgmt:upload-jobs</path>
+ <path>/rw-image-mgmt:create-upload-job</path>
+ <path>/rw-image-mgmt:create-upload-job</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-image-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-image-mgmt:create-upload-job</path>
+ <path>/rw-image-mgmt:create-upload-job</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf;
}
- tailf:annotate "/rw-image-mgmt:upload-jobs" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-image-mgmt:upload-jobs" {
tailf:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "yang";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
import rw-cloud {
- prefix "rwcloud";
+ prefix "rw-cloud";
}
import rwcal {
prefix "rwcal";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ import mano-types {
+ prefix "mano-types";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
leaf cloud-account {
description "The cloud account to upload the image to";
type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
uses upload-task-status;
}
- container upload-jobs {
- rwpb:msg-new UploadJobs;
- description "Image upload jobs";
- config false;
+ augment "/rw-project:project" {
+ container upload-jobs {
+ description "Image upload jobs";
+ config false;
- list job {
- rwpb:msg-new UploadJob;
- key "id";
+ list job {
+ key "id";
- leaf id {
- description "Unique image upload job-id";
- type uint32;
- }
+ leaf id {
+ description "Unique image upload job-id";
+ type uint32;
+ }
- leaf status {
- description "Current job status";
- type job-status;
- }
+ leaf status {
+ description "Current job status";
+ type job-status;
+ }
- leaf start-time {
- description "The job start time (unix epoch)";
- type uint32;
- }
+ leaf start-time {
+ description "The job start time (unix epoch)";
+ type uint32;
+ }
- leaf stop-time {
- description "The job stop time (unix epoch)";
- type uint32;
- }
+ leaf stop-time {
+ description "The job stop time (unix epoch)";
+ type uint32;
+ }
- list upload-tasks {
- rwpb:msg-new UploadTask;
- description "The upload tasks that are part of this job";
- uses upload-task;
+ list upload-tasks {
+ description "The upload tasks that are part of this job";
+ uses upload-task;
+ }
}
}
}
rpc create-upload-job {
input {
- rwpb:msg-new CreateUploadJob;
+
+ uses mano-types:rpc-project-name;
choice image-selection {
case onboarded-image {
leaf-list cloud-account {
description "List of cloud accounts to upload the image to";
type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ path "/rw-project:project[rw-project:name=current()/.." +
+ "/project-name]/rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
}
}
}
output {
- rwpb:msg-new CreateUploadJobOutput;
leaf job-id {
description "The upload job-id to cancel";
type uint32;
rpc cancel-upload-job {
input {
- rwpb:msg-new CancelUploadJob;
leaf job-id {
type uint32;
}
+
+ uses mano-types:rpc-project-name;
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-launchpad-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-oper</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:account-admin</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:lcm-admin</role>
+ <keys-role>rw-project-mano:rw-launchpad-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-launchpad:datacenters</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf;
}
- tailf:annotate "/rw-launchpad:datacenters" {
- tailf:callpoint rw_callpoint;
+ import rw-project {
+ prefix "rw-project";
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "yang";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import ietf-inet-types {
prefix "inet";
}
-
import rw-cli-ext {
prefix "rwcli";
}
prefix "rwcal";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
import vld {
prefix "vld";
}
- import rw-nsd {
- prefix "rw-nsd";
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
}
import rw-cloud {
prefix "manotypes";
}
- revision 2015-09-14 {
- description
- "Initial revision.";
+ import rw-project {
+ prefix "rw-project";
}
- container datacenters {
- description "OpenMano data centers";
-
- rwpb:msg-new DataCenters;
- config false;
-
- list ro-accounts {
- description
- "A list of OpenMano cloud accounts that have data centers associated
- with them";
-
- rwpb:msg-new ROAccount;
- key "name";
-
- leaf name {
- description "The name of the cloud account";
- type leafref {
- path "/rw-launchpad:resource-orchestrator/rw-launchpad:name";
- }
- }
-
- list datacenters {
- rwpb:msg-new DataCenter;
- leaf uuid {
- description "The UUID of the data center";
- type yang:uuid;
- }
-
- leaf name {
- description "The name of the data center";
- type string;
- }
- }
- }
+ import rw-project-mano {
+ prefix "rw-project-mano";
}
- typedef resource-orchestrator-account-type {
- description "RO account type";
- type enumeration {
- enum rift-ro;
- enum openmano;
- }
+ import rw-ro-account {
+ prefix "rw-ro-account";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
}
- container resource-orchestrator {
- rwpb:msg-new ResourceOrchestrator;
-
- leaf name {
- type string;
- }
-
- leaf account-type {
- type resource-orchestrator-account-type;
- }
-
- choice resource-orchestrator {
- description
- "The resource orchestrator to use by the Launchpad";
- default rift-ro;
-
- case rift-ro {
- description
- "Use the RIFT.io resource orchestrator";
-
- container rift-ro {
- leaf rift-ro {
- type empty;
- }
- }
- }
-
- case openmano {
- description
- "Use OpenMano as RO";
-
- container openmano {
- leaf host {
- type string;
- default "localhost";
- }
-
- leaf port {
- type uint16;
- default 9090;
- }
-
- leaf tenant-id {
- type string {
- length "36";
- }
- mandatory true;
- }
- }
- }
- }
+ revision 2015-09-14 {
+ description
+ "Initial revision.";
}
- container launchpad-config {
- leaf public-ip {
- description
+ augment "/rw-project:project" {
+ container launchpad-config {
+ leaf public-ip {
+ description
"An IP address that can, at least, be reached by the host that the
launchpad is running on. This is not a mandatory but is required for
alarms to function correctly.";
- type string;
+ type string;
+ }
}
}
}
namespace "http://riftio.com/ns/riftware-1.0/rw-monitor";
prefix "rw-monitor";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
prefix "yang";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-30 {
description
"Initial revision.";
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-nsm";
prefix "rw-nsm";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
prefix "inet";
}
- import rw-nsd {
- prefix "rw-nsd";
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
}
- import nsd {
- prefix "nsd";
+
+ import project-nsd {
+ prefix "project-nsd";
}
+
import rw-nsr {
prefix "rw-nsr";
}
+
import vld {
prefix "vld";
}
+
import rw-vlr {
prefix "rw-vlr";
}
+
import rw-vns {
prefix "rw-vns";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
- import vnfd {
- prefix "vnfd";
+
+ import project-vnfd {
+ prefix "project-vnfd";
}
+
import rw-vnfr {
prefix "rw-vnfr";
}
prefix "rw-config-agent";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-07 {
description
"Initial revision.";
leaf cm-username {
description "RO endpoint username";
type string;
- default "admin";
+ default "@rift";
}
leaf cm-password {
description "RO endpoint password";
type string;
- default "admin";
- }
- }
-
- container ro-config {
- description "Resource Orchestrator endpoint ip address";
- rwpb:msg-new "roConfig";
- rwcli:new-mode "ro-config";
-
- container cm-endpoint {
- description "Service Orchestrator endpoint ip address";
- rwpb:msg-new "SoEndpoint";
- uses cm-endpoint;
+ default "rift";
}
}
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-pkg-mgmt-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:rw-pkg-mgmt-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-pkg-mgmt:download-jobs</path>
+ <path>/rw-project:project/rw-pkg-mgmt:copy-jobs</path>
+ <path>/rw-pkg-mgmt:get-package-endpoint</path>
+ <path>/rw-pkg-mgmt:get-package-schema</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:rw-pkg-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-pkg-mgmt:download-jobs</path>
+ <path>/rw-project:project/rw-pkg-mgmt:copy-jobs</path>
+ <path>/rw-project:project/rw-pkg-mgmt:create-jobs</path>
+ <path>/rw-project:project/rw-pkg-mgmt:update-jobs</path>
+ <path>/rw-pkg-mgmt:get-package-endpoint</path>
+ <path>/rw-pkg-mgmt:get-package-schema</path>
+ <path>/rw-pkg-mgmt:package-copy</path>
+ <path>/rw-pkg-mgmt:package-file-add</path>
+ <path>/rw-pkg-mgmt:package-file-delete</path>
+ <path>/rw-pkg-mgmt:package-create</path>
+ <path>/rw-pkg-mgmt:package-update</path>
+ <path>/rw-pkg-mgmt:package-export</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-pkg-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-pkg-mgmt:get-package-endpoint</path>
+ <path>/rw-pkg-mgmt:get-package-schema</path>
+ <path>/rw-pkg-mgmt:package-copy</path>
+ <path>/rw-pkg-mgmt:package-file-add</path>
+ <path>/rw-pkg-mgmt:package-file-delete</path>
+ <path>/rw-pkg-mgmt:package-create</path>
+ <path>/rw-pkg-mgmt:package-update</path>
+ <path>/rw-pkg-mgmt:package-export</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf;
}
- tailf:annotate "/rw-pkg-mgmt:download-jobs" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-pkg-mgmt:download-jobs" {
+ tailf:callpoint rw_callpoint;
+ }
+
+ tailf:annotate "/rw-project:project/rw-pkg-mgmt:copy-jobs" {
+ tailf:callpoint rw_callpoint;
+ }
+
+ tailf:annotate "/rw-project:project/rw-pkg-mgmt:create-jobs" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/rw-pkg-mgmt:copy-jobs" {
+ tailf:annotate "/rw-project:project/rw-pkg-mgmt:update-jobs" {
tailf:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "yang";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
import rw-vnfd {
prefix "rwvnfd";
}
+
import rw-nsd {
prefix "rwnsd";
}
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
+ }
+
+ import rw-project-nsd {
+ prefix "rw-project-nsd";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
enum IMAGES;
enum CLOUD_INIT;
enum README;
+ enum DOC;
+ enum TEST;
}
}
}
}
- container download-jobs {
- rwpb:msg-new DownloadJobs;
- description "Download jobs";
- config false;
+ augment "/rw-project:project" {
+ container download-jobs {
+ description "Download jobs";
+ config false;
- list job {
- rwpb:msg-new DownloadJob;
- key "download-id";
+ list job {
+ key "download-id";
- leaf download-id {
- description "Unique UUID";
- type string;
- }
+ leaf download-id {
+ description "Unique UUID";
+ type string;
+ }
- leaf url {
- description "URL of the download";
- type string;
+ leaf url {
+ description "URL of the download";
+ type string;
+ }
+
+ uses package-file-identifer;
+ uses download-task-status;
}
+ }
- uses package-file-identifer;
- uses download-task-status;
+ container copy-jobs {
+ description "Copy jobs";
+ config false;
+
+ list job {
+ key "transaction-id";
+
+ leaf transaction-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ uses copy-task-status;
+ }
}
- }
- container copy-jobs {
- rwpb:msg-new CopyJobs;
- description "Copy jobs";
- config false;
+ container create-jobs {
+ description "Create jobs";
+ config false;
- list job {
- rwpb:msg-new CopyJob;
- key "transaction-id";
+ list job {
+ key "transaction-id";
- leaf transaction-id {
- description "Unique UUID";
- type string;
+ leaf transaction-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ uses copy-task-status;
}
+ }
+
+ container update-jobs {
+ description "Update jobs";
+ config false;
- uses copy-task-status;
+ list job {
+ key "transaction-id";
+
+ leaf transaction-id {
+ description "Unique UUID";
+ type string;
+ }
+
+ uses copy-task-status;
+ }
}
}
+
rpc get-package-endpoint {
description "Retrieves the endpoint for the descriptor";
input {
uses package-identifer;
+ uses manotypes:rpc-project-name;
}
output {
description "Name of destination package";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
output {
description "Type of the package";
type manotypes:package-type;
}
+
+ uses manotypes:rpc-project-name;
}
output {
input {
uses package-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
}
output {
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
input {
uses package-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
}
output {
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
input {
uses package-identifer;
+ uses manotypes:rpc-project-name;
leaf export-schema {
description "Schema to export";
type export-format;
default YAML;
}
-
}
output {
description "Valid ID to track the status of the task";
type string;
}
+
+ uses manotypes:rpc-project-name;
}
}
input {
uses package-file-identifer;
uses external-url-data;
+ uses manotypes:rpc-project-name;
choice catalog-type {
mandatory true;
}
}
}
-
}
output {
description "Valid ID to track the status of the task";
type string;
}
+ uses manotypes:rpc-project-name;
}
}
input {
uses package-file-identifer;
+ uses manotypes:rpc-project-name;
+
choice catalog-type {
case VNFD {
leaf vnfd-file-type {
}
}
}
-
}
output {
type string;
}
+ uses manotypes:rpc-project-name;
}
}
--- /dev/null
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+
+module rw-project-person-db
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-person-db";
+ prefix "rw-project-person-db";
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2016-04-03 {
+ description
+ "Initial revision.
+ Test YANG for unit testing.";
+ }
+
+ augment "/rw-project:project" {
+ container person {
+
+ leaf name {
+ description
+ "This is the person's name.";
+ type string;
+ }
+ }
+
+ container flat-person {
+ leaf name {
+ type string;
+ }
+ }
+ }
+}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf;
}
- tailf:annotate "/rw-resource-mgr:resource-pool-records" {
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ tailf:annotate "/rw-project:project/rw-resource-mgr:resource-pool-records" {
tailf:callpoint rw_callpoint;
}
- tailf:annotate "/rw-resource-mgr:resource-mgmt" {
+ tailf:annotate "/rw-project:project/rw-resource-mgr:resource-mgmt" {
tailf:callpoint rw_callpoint;
}
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
namespace "http://riftio.com/ns/riftware-1.0/rw-resource-mgr";
prefix "rw-resource-mgr";
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
}
import rw-cloud {
- prefix "rwcloud";
+ prefix "rw-cloud";
}
import rwcal {
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-16 {
description
"Initial revision.";
grouping resource-pool-info {
leaf name {
description "Name of the resource pool";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
type string;
//mandatory true;
}
}
- container resource-mgr-config {
- description "Data model for configuration of resource-mgr";
- rwpb:msg-new ResourceManagerConfig;
- config true;
-
- container management-domain {
- leaf name {
- description "The management domain name this launchpad is associated with.";
- rwpb:field-inline "true";
- rwpb:field-string-max 64;
- type string;
- //mandatory true;
- }
- }
+ augment "/rw-project:project" {
+ container resource-mgr-config {
+ description "Data model for configuration of resource-mgr";
+ config true;
- container resource-pools {
- description "Resource Pool configuration";
- rwpb:msg-new ResourcePools;
- list cloud-account {
- key "name";
+ container management-domain {
leaf name {
- description
- "Resource pool for the configured cloud account";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ description "The management domain name this launchpad is associated with.";
+ type string;
+ //mandatory true;
+ }
+ }
+
+ container resource-pools {
+ description "Resource Pool configuration";
+ list cloud-account {
+ key "name";
+ leaf name {
+ description
+ "Resource pool for the configured cloud account";
+ type leafref {
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
}
}
}
}
- container resource-mgmt {
- description "Resource management ";
- config false;
+ augment "/rw-project:project" {
+ container resource-mgmt {
+ description "Resource management ";
+ config false;
- container vdu-event {
- description "Events for VDU Management";
- rwpb:msg-new VDUEvent;
+ container vdu-event {
+ description "Events for VDU Management";
+
+ list vdu-event-data {
+ key "event-id";
- list vdu-event-data {
- rwpb:msg-new VDUEventData;
- key "event-id";
-
- leaf event-id {
- description "Identifier associated with the VDU transaction";
- type yang:uuid;
- }
+ leaf event-id {
+ description "Identifier associated with the VDU transaction";
+ type yang:uuid;
+ }
- leaf cloud-account {
- description "The cloud account to use for this resource request";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ leaf cloud-account {
+ description "The cloud account to use for this resource request";
+ type leafref {
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
- }
- container request-info {
- description "Information about required resource";
+ container request-info {
+ description "Information about required resource";
- uses rwcal:vdu-create-params;
- }
+ uses rwcal:vdu-create-params;
+ }
- container resource-info {
- description "Information about allocated resource";
- leaf pool-name {
- type string;
+ container resource-info {
+ description "Information about allocated resource";
+ leaf pool-name {
+ type string;
+ }
+ uses resource-state;
+ uses rwcal:vdu-info-params;
}
- uses resource-state;
- uses rwcal:vdu-info-params;
}
}
- }
-
- container vlink-event {
- description "Events for Virtual Link management";
- rwpb:msg-new VirtualLinkEvent;
- list vlink-event-data {
- rwpb:msg-new VirtualLinkEventData;
+ container vlink-event {
+ description "Events for Virtual Link management";
+
+ list vlink-event-data {
+
+ key "event-id";
- key "event-id";
-
- leaf event-id {
- description "Identifier associated with the Virtual Link transaction";
- type yang:uuid;
- }
+ leaf event-id {
+ description "Identifier associated with the Virtual Link transaction";
+ type yang:uuid;
+ }
- leaf cloud-account {
- description "The cloud account to use for this resource request";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ leaf cloud-account {
+ description "The cloud account to use for this resource request";
+ type leafref {
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
- }
- container request-info {
- description "Information about required resource";
+ container request-info {
+ description "Information about required resource";
- uses rwcal:virtual-link-create-params;
- }
+ uses rwcal:virtual-link-create-params;
+ }
- container resource-info {
- leaf pool-name {
- type string;
+ container resource-info {
+ leaf pool-name {
+ type string;
+ }
+ uses resource-state;
+ uses rwcal:virtual-link-info-params;
}
- uses resource-state;
- uses rwcal:virtual-link-info-params;
}
}
}
}
- container resource-pool-records {
- description "Resource Pool Records";
- rwpb:msg-new ResourcePoolRecords;
- config false;
+ augment "/rw-project:project" {
+ container resource-pool-records {
+ description "Resource Pool Records";
+ config false;
- list cloud-account {
- key "name";
- leaf name {
- description
- "The configured cloud account's pool records.";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ list cloud-account {
+ key "name";
+ leaf name {
+ description
+ "The configured cloud account's pool records.";
+ type leafref {
+ path "../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
- }
- list records {
- rwpb:msg-new ResourceRecordInfo;
- key "name";
- uses resource-pool-info;
+ list records {
+ key "name";
+ uses resource-pool-info;
- leaf pool-status {
- type enumeration {
- enum unknown;
- enum locked;
- enum unlocked;
+ leaf pool-status {
+ type enumeration {
+ enum unknown;
+ enum locked;
+ enum unlocked;
+ }
}
- }
- leaf total-resources {
- type uint32;
- }
+ leaf total-resources {
+ type uint32;
+ }
- leaf free-resources {
- type uint32;
- }
+ leaf free-resources {
+ type uint32;
+ }
- leaf allocated-resources {
- type uint32;
+ leaf allocated-resources {
+ type uint32;
+ }
}
}
}
}
- container resource-mgr-data{
- description "Resource Manager operational data";
- config false;
+ augment "/rw-project:project" {
+ container resource-mgr-data {
+ description "Resource Manager operational data";
+ config false;
- container pool-record {
- description "Resource Pool record";
+ container pool-record {
+ description "Resource Pool record";
- list cloud {
- key "name";
- max-elements 16;
- rwpb:msg-new "ResmgrCloudPoolRecords";
- leaf name {
- description
- "The configured cloud account's pool records.";
- type leafref {
- path "/rwcloud:cloud/rwcloud:account/rwcloud:name";
+ list cloud {
+ key "name";
+ max-elements 16;
+ leaf name {
+ description
+ "The configured cloud account's pool records.";
+ type leafref {
+ path "../../../../rw-cloud:cloud/rw-cloud:account/rw-cloud:name";
+ }
}
- }
- list records {
- key "name";
- uses resource-pool-info;
+ list records {
+ key "name";
+ uses resource-pool-info;
- list free-vdu-list {
- key vdu-id;
- uses rwcal:vdu-info-params;
- }
+ list free-vdu-list {
+ key vdu-id;
+ uses rwcal:vdu-info-params;
+ }
- list in-use-vdu-list {
- key vdu-id;
- uses rwcal:vdu-info-params;
- }
+ list in-use-vdu-list {
+ key vdu-id;
+ uses rwcal:vdu-info-params;
+ }
- list free-vlink-list {
- key virtual-link-id;
- uses rwcal:virtual-link-info-params;
- }
+ list free-vlink-list {
+ key virtual-link-id;
+ uses rwcal:virtual-link-info-params;
+ }
- list in-use-vlink-list {
+ list in-use-vlink-list {
key virtual-link-id;
- uses rwcal:virtual-link-info-params;
+ uses rwcal:virtual-link-info-params;
+ }
}
}
}
}
}
+
+ augment "/rw-project:project/resource-mgmt/vdu-event/vdu-event-data/request-info/vm-flavor" {
+ uses manotypes:vm-flavor-name;
+ }
+
}
--- /dev/null
+<?xml version="1.0" ?>
+<config xmlns="http://riftio.com/ns/riftware-1.0/rw-rbac-role-def">
+ <key-definition>
+ <role>rw-project-mano:rw-staging-mgmt-role</role>
+ <key-set>
+ <name>project-name</name>
+ <path>/rw-project:project/rw-project:name</path>
+ </key-set>
+ </key-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-oper</role>
+ <keys-role>rw-project-mano:rw-staging-mgmt-role</keys-role>
+ <authorize>
+ <permissions>read execute</permissions>
+ <path>/rw-project:project/rw-staging-mgmt:staging-areas</path>
+ <path>/rw-staging-mgmt:create-staging-area</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project-mano:catalog-admin</role>
+ <keys-role>rw-project-mano:rw-staging-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-project:project/rw-staging-mgmt:staging-areas</path>
+ <path>/rw-staging-mgmt:create-staging-area</path>
+ </authorize>
+ </role-definition>
+
+ <role-definition>
+ <role>rw-project:project-admin</role>
+ <keys-role>rw-project-mano:rw-staging-mgmt-role</keys-role>
+ <authorize>
+ <permissions>create read update delete execute</permissions>
+ <path>/rw-staging-mgmt:create-staging-area</path>
+ </authorize>
+ </role-definition>
+</config>
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix tailf;
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
tailf:annotate "/rw-staging-mgmt:create-staging-area" {
tailf:actionpoint rw_actionpoint;
}
- tailf:annotate "/rw-staging-mgmt:staging-areas" {
+ tailf:annotate "/rw-project:project/rw-staging-mgmt:staging-areas" {
tailf:callpoint rw_callpoint;
}
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix "yang";
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2016-06-01 {
description
"Initial revision.";
type uint64;
default 3600;
}
+
+ leaf project-name {
+ description "Project to which this belongs";
+ type leafref {
+ path "/rw-project:project/rw-project:name";
+ }
+ }
}
grouping staging-area-meta {
}
- container staging-areas {
- rwpb:msg-new StagingAreas;
- description "Staging Areas";
- config false;
+ augment "/rw-project:project" {
+ container staging-areas {
+ description "Staging Areas";
+ config false;
- list staging-area {
- rwpb:msg-new StagingArea;
- key "area-id";
+ list staging-area {
+ key "area-id";
- leaf area-id {
- description "Staging Area ID";
- type string;
- }
+ leaf area-id {
+ description "Staging Area ID";
+ type string;
+ }
- uses staging-area-config;
- uses staging-area-meta;
+ uses staging-area-config;
+ uses staging-area-meta;
+ }
}
}
-
rpc create-staging-area {
description "Creates a staging area for the upload.";
prefix "rw-vns";
}
- import rw-vnfd {
- prefix "rw-vnfd";
+ import rw-project-vnfd {
+ prefix "rw-project-vnfd";
}
import rw-vnfr {
prefix "rw-launchpad";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-07 {
description
"Initial revision.";
namespace "http://riftio.com/ns/riftware-1.0/rw-vns";
prefix "rw-vns";
-
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-cli-ext {
prefix "rwcli";
}
prefix "rw-sdn";
}
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
revision 2015-10-05 {
description
"Initial revision.";
install(
PROGRAMS
+ pingpong_accounts_systest
pingpong_longevity_systest
pingpong_vnf_systest
pingpong_records_systest
pingpong_vnf_reload_systest
pingpong_lp_ha_systest
pingpong_recovery_systest
+ pingpong_floating_ip
pingpong_scaling_systest
+ pingpong_ha_systest
+ pingpong_mro_systest
+ pingpong_input_params_systest
+ primitives_systest
+ onboard_delete_vnfs_systest
+ accounts_creation_onboard_instatiate_systest
+ accounts_creation_onboard_instatiate_systest_repeat_option
+ accounts_creation_onboard_systest
scaling_systest
DESTINATION usr/rift/systemtest/pingpong_vnf
- COMPONENT ${PKG_LONG_NAME})
+ )
install(
PROGRAMS
multi_vm_vnf_slb_systest.sh
multi_vm_vnf_trafgen_systest.sh
DESTINATION usr/rift/systemtest/multi_vm_vnf
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ PROGRAMS
+ rbac_basics_systest
+ rbac_identity
+ rbac_roles_systest
+ rbac_usage_scenarios_systest
+ rbac_mano_xpaths_access
+ tbac_token
+ complex_scaling
+ DESTINATION usr/rift/systemtest/rbac
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ PROGRAMS
+ gui_test_launchpad_ui
+ DESTINATION usr/rift/systemtest/gui_tests
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ PROGRAMS
+ ha_basics_systest
+ ha_multiple_failovers_systest
+ DESTINATION usr/rift/systemtest/ha
COMPONENT ${PKG_LONG_NAME})
+install(
+ PROGRAMS
+ accounts_creation_onboard_instatiate_systest
+ l2port_chaining_systest
+ metadata_vdud_systest
+ ns_instantiate_memory_check_systest
+ DESTINATION usr/rift/systemtest/mano
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
install(
FILES
pytest/multivm_vnf/conftest.py
pytest/multivm_vnf/test_multi_vm_vnf_trafgen.py
pytest/multivm_vnf/test_trafgen_data.py
DESTINATION usr/rift/systemtest/pytest/multi_vm_vnf
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
PROGRAMS
launchpad_longevity_systest
launchpad_systest
DESTINATION usr/rift/systemtest/launchpad
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
racfg/multi_tenant_systest_openstack.racfg
DESTINATION usr/rift/systemtest/launchpad
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
pytest/test_start_standby.py
pytest/test_failover.py
DESTINATION usr/rift/systemtest/pytest/system
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
pytest/ns/conftest.py
pytest/ns/test_onboard.py
+ pytest/ns/test_multiple_ns_instantiation.py
DESTINATION usr/rift/systemtest/pytest/system/ns
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
+ pytest/ns/pingpong/test_accounts_framework.py
+ pytest/ns/pingpong/test_floating_ip.py
+ pytest/ns/pingpong/test_ha_pingpong.py
pytest/ns/pingpong/test_pingpong.py
pytest/ns/pingpong/test_pingpong_longevity.py
pytest/ns/pingpong/test_records.py
pytest/ns/pingpong/test_scaling.py
+ pytest/ns/pingpong/test_mro_pingpong.py
+ pytest/ns/pingpong/test_input_params.py
DESTINATION usr/rift/systemtest/pytest/system/ns/pingpong
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ pytest/ns/rbac/conftest.py
+ pytest/ns/rbac/test_rbac.py
+ pytest/ns/rbac/test_rbac_roles.py
+ pytest/ns/rbac/test_rbac_identity.py
+ pytest/ns/rbac/test_tbac_token.py
+ pytest/ns/rbac/test_rbac_usages.py
+ pytest/ns/rbac/test_rbac_mano_xpath_access.py
+ DESTINATION usr/rift/systemtest/pytest/system/ns/rbac
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ pytest/ns/ha/conftest.py
+ pytest/ns/ha/test_ha_basic.py
+ pytest/ns/ha/test_ha_operations.py
+ pytest/ns/ha/test_ha_multiple_failovers.py
+ DESTINATION usr/rift/systemtest/pytest/system/ns/ha
COMPONENT ${PKG_LONG_NAME})
+install(
+ FILES
+ pytest/ns/gui_tests/conftest.py
+ pytest/ns/gui_tests/test_launchpad_ui.py
+ DESTINATION usr/rift/systemtest/pytest/system/ns/gui_tests
+ COMPONENT ${PKG_LONG_NAME})
+
+install(
+ FILES
+ pytest/ns/restapitest/test_project_restapi.py
+ DESTINATION usr/rift/systemtest/pytest/system/ns/restapitest
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ pytest/ns/restapitest/utils/__init__.py
+ pytest/ns/restapitest/utils/imports.py
+ pytest/ns/restapitest/utils/tbac_token_utils.py
+ pytest/ns/restapitest/utils/traversal_engine.py
+ pytest/ns/restapitest/utils/utils.py
+ DESTINATION usr/rift/systemtest/pytest/system/ns/restapitest/utils
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ pytest/ns/restapitest/test_inputs/test_inputs.json
+ DESTINATION usr/rift/systemtest/pytest/system/ns/restapitest/test_inputs
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
install(
FILES
pytest/ns/haproxy/test_scaling.py
DESTINATION usr/rift/systemtest/pytest/system/ns/haproxy
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
install(
FILES
+ racfg/pingpong_accounts_systest.racfg
racfg/pingpong_vnf_systest_cloudsim.racfg
racfg/pingpong_vnf_systest_openstack.racfg
racfg/pingpong_scaling_systest_openstack.racfg
+ racfg/pingpong_ha_systest_openstack.racfg
racfg/pingpong_records_systest_cloudsim.racfg
racfg/pingpong_records_systest_openstack.racfg
racfg/pingpong_records_systest_openstack_xml.racfg
racfg/pingpong_vnf_reload_systest_openstack.racfg
racfg/pingpong_vnf_reload_systest_openstack_xml.racfg
+ racfg/pingpong_staticip_systest_openstack.racfg
+ racfg/pingpong_staticip_systest_openstack_ipv6.racfg
+ racfg/pingpong_portsecurity_systest_openstack.racfg
+ racfg/pingpong_port_sequencing_systest_openstack.racfg
+ racfg/pingpong_port_sequencing_systest_openstack_xml.racfg
+ racfg/pingpong_vnf_dependencies_systest_openstack.racfg
+ racfg/pingpong_vnf_dependencies_systest_openstack_xml.racfg
+ racfg/pingpong_input_params_systest.racfg
+ racfg/pingpong_mro_systest.racfg
+ racfg/primitives_systest.racfg
+ racfg/pingpong_floating_ip.racfg
racfg/scaling_systest.racfg
racfg/recovery_systest.racfg
racfg/pingpong_lp_ha_systest_openstack.racfg
+ racfg/pingpong_update_descriptors_instantiate_systest_openstack.racfg
+ racfg/onboard_delete_vnfs_systest_openstack.racfg
+ racfg/pingpong_metadata_vdud_systest_openstack.racfg
+ racfg/pingpong_multidisk_systest_openstack.racfg
+ racfg/pingpong_multidisk_systest_openstack_xml.racfg
+ racfg/embedded_images_vnf_multiple_accounts_systest_openstack.racfg
DESTINATION usr/rift/systemtest/pingpong_vnf
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ racfg/l2port_chaining_systest_openstack.racfg
+ racfg/metadata_vdud_systest_openstack.racfg
+ racfg/ns_instantiate_memory_check.racfg
+ DESTINATION usr/rift/systemtest/mano
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ racfg/gui_test_launchpad_ui.racfg
+ DESTINATION usr/rift/systemtest/gui_tests
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ racfg/rbac_basics_systest.racfg
+ racfg/rbac_identity.racfg
+ racfg/rbac_user_roles_systest.racfg
+ racfg/rbac_project_roles_systest.racfg
+ racfg/rbac_account_roles_systest.racfg
+ racfg/rbac_nsr_roles_systest.racfg
+ racfg/rbac_onboarding_roles_systest.racfg
+ racfg/rbac_syslog_server_roles_systest.racfg
+ racfg/rbac_redundancy_config_roles_systest.racfg
+ racfg/rbac_usage_scenarios_systest.racfg
+ racfg/rbac_mano_xpaths_access.racfg
+ racfg/rbac_account_roles_systest_restconf.racfg
+ racfg/rbac_basics_systest_restconf.racfg
+ racfg/rbac_mano_xpaths_access_restconf.racfg
+ racfg/rbac_usage_scenarios_systest_restconf.racfg
+ racfg/tbac_basics_systest.racfg
+ racfg/tbac_identity.racfg
+ racfg/tbac_token.racfg
+ racfg/tbac_user_roles_systest.racfg
+ racfg/tbac_project_roles_systest.racfg
+ racfg/tbac_account_roles_systest.racfg
+ racfg/tbac_nsr_roles_systest.racfg
+ racfg/tbac_onboarding_roles_systest.racfg
+ racfg/tbac_syslog_server_roles_systest.racfg
+ racfg/tbac_usage_scenarios_systest.racfg
+ racfg/tbac_mano_xpaths_access.racfg
+ racfg/tbac_basics_systest_xml.racfg
+ racfg/tbac_identity_xml.racfg
+ racfg/tbac_token_xml.racfg
+ racfg/tbac_user_roles_systest_xml.racfg
+ racfg/tbac_project_roles_systest_xml.racfg
+ racfg/tbac_account_roles_systest_xml.racfg
+ racfg/tbac_nsr_roles_systest_xml.racfg
+ racfg/tbac_onboarding_roles_systest_xml.racfg
+ racfg/tbac_syslog_server_roles_systest_xml.racfg
+ racfg/tbac_usage_scenarios_systest_xml.racfg
+ racfg/tbac_mano_xpaths_access_xml.racfg
+ racfg/complex_scaling.racfg
+ DESTINATION usr/rift/systemtest/rbac
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+install(
+ FILES
+ racfg/ha_basics_systest.racfg
+ racfg/ha_nsr_systest.racfg
+ racfg/ha_multiple_failovers_systest.racfg
+ DESTINATION usr/rift/systemtest/ha
COMPONENT ${PKG_LONG_NAME})
install(
racfg/multivm_vnf_slb_systest.racfg
racfg/multivm_vnf_trafgen_systest.racfg
DESTINATION usr/rift/systemtest/multi_vm_vnf
- COMPONENT ${PKG_LONG_NAME})
+ COMPONENT ${INSTALL_COMPONENT}
+ )
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py --repeat 2"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_complex_scaling' \
+ ${PYTEST_DIR}/system/ns/rbac/test_rbac_usages.py"
+
+REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_complex_scaling_verification' \
+ ${PYTEST_DIR}/system/ns/rbac/test_rbac_usages.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/ns/gui_tests/test_launchpad_ui.py"
+
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/ns/ha/test_ha_basic.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/ns/ha/test_ha_operations.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/ns/ha/test_ha_multiple_failovers.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py::TestRecordsData::test_l2_port_chaining"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py::TestRecordsData::test_wait_for_ns_configured \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py::TestRecordsData::test_metadata_vdud"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py::TestNsrStart::test_upload_images \
+ ${PYTEST_DIR}/system/ns/test_multiple_ns_instantiation.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py::TestNsrStart::test_upload_delete_descriptors"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2017/06/21
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+restconf=true
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_accounts_framework.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/ns/pingpong/test_floating_ip.py"
+
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2017/04/27
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -s -p no:cacheprovider \
+ ${PYTEST_DIR}/system/ns/pingpong/test_ha_pingpong.py"
+
+test_prefix="pingpong_ha_systest"
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
+exit $?
+
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2017/06/22
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_input_params.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Paul Laidler
+# Creation Date: 2017/06/21
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+restconf=true
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_mro_pingpong.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
# Parse commonline argument and set test variables
parse_args "${@}"
+# We want to run in expanded mode
+collapsed_mode=false
+
# Construct the test command based on the test variables
construct_test_command
cd "${PYTEST_DIR}"
eval ${test_cmd}
+test_rc=$?
# display scaling log
scaling_log="${RIFT_ARTIFACTS}/scaling_${AUTO_TASK_ID}.log"
cat ${scaling_log}
+
+exit $test_rc
${PYTEST_DIR}/system/ns/test_onboard.py \
${PYTEST_DIR}/system/ns/pingpong/test_records.py"
-REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_wait_for_launchpad_started or test_wait_for_pingpong_configured or test_wait_for_pingpong_configured or Teardown' \
+REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'test_wait_for_nsr_started or test_wait_for_nsr_configured or Teardown' \
${PYTEST_DIR}/system/test_launchpad.py \
${PYTEST_DIR}/system/ns/test_onboard.py \
${PYTEST_DIR}/system/ns/pingpong/test_records.py"
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/test_launchpad.py \
+ ${PYTEST_DIR}/system/ns/test_onboard.py \
+ ${PYTEST_DIR}/system/ns/pingpong/test_records.py::TestRecordsData::test_primitives"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
# limitations under the License.
#
-import pytest
+import gi
+import itertools
+import logging
import os
+import pytest
+import random
+import re
+import rwlogger
+import rw_peas
import subprocess
import sys
+import rift.auto.accounts
import rift.auto.log
import rift.auto.session
-import rift.vcs.vcs
import rift.rwcal.openstack
-import logging
+import rift.vcs.vcs
-import gi
-gi.require_version('RwCloudYang', '1.0')
+from gi import require_version
+require_version('RwCloudYang', '1.0')
+require_version('RwTypes', '1.0')
+require_version('RwRbacPlatformYang', '1.0')
+require_version('RwUserYang', '1.0')
+require_version('RwProjectYang', '1.0')
+require_version('RwConmanYang', '1.0')
+require_version('RwRbacInternalYang', '1.0')
+require_version('RwRoAccountYang', '1.0')
+
+from gi.repository import (
+ RwCloudYang,
+ RwTypes,
+ RwUserYang,
+ RwProjectYang,
+ RwRbacPlatformYang,
+ RwConmanYang,
+ RwRbacInternalYang,
+ RwRoAccountYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def use_accounts():
+ account_names = os.environ.get('RW_AUTO_ACCOUNTS')
+ if account_names:
+ return account_names.split(":")
+ return []
-from gi.repository import RwCloudYang
+@pytest.fixture(scope='session')
+def account_storage():
+ return rift.auto.accounts.Storage()
+
+@pytest.fixture(scope='session')
+def stored_accounts(account_storage):
+ return account_storage.list_cloud_accounts()
@pytest.fixture(scope='session')
def cloud_name_prefix():
return 'cloud'
@pytest.fixture(scope='session')
-def cloud_account_name(cloud_name_prefix):
+def cloud_account_name(cloud_account):
'''fixture which returns the name used to identify the cloud account'''
- return '{prefix}-0'.format(prefix=cloud_name_prefix)
+ return cloud_account.name
@pytest.fixture(scope='session')
def sdn_account_name():
'''fixture which returns the name used to identify the sdn account'''
return 'sdn-0'
+@pytest.fixture(scope='session')
+def openstack_sdn_account_name():
+ '''fixture which returns the name used to identify the sdn account'''
+ return 'openstack-sdn-0'
+
@pytest.fixture(scope='session')
def sdn_account_type():
'''fixture which returns the account type used by the sdn account'''
Returns:
xpath to be used when configure a cloud account
'''
- return '/cloud/account'
+ return '/rw-project:project[rw-project:name="default"]/cloud/account'
@pytest.fixture(scope='session')
-def cloud_accounts(cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type):
+def cloud_accounts(request, cloud_module, cloud_name_prefix, cloud_host, cloud_user, cloud_tenants, cloud_type, stored_accounts, use_accounts, vim_host_override, vim_ssl_enabled, vim_user_domain_override, vim_project_domain_override, logger):
'''fixture which returns a list of CloudAccounts. One per tenant provided
Arguments:
- cloud_module - fixture: module defining cloud account
- cloud_name_prefix - fixture: name prefix used for cloud account
- cloud_host - fixture: cloud host address
- cloud_user - fixture: cloud account user key
- cloud_tenants - fixture: list of tenants to create cloud accounts on
- cloud_type - fixture: cloud account type
+ cloud_module - fixture: module defining cloud account
+ cloud_name_prefix - fixture: name prefix used for cloud account
+ cloud_host - fixture: cloud host address
+ cloud_user - fixture: cloud account user key
+ cloud_tenants - fixture: list of tenants to create cloud accounts on
+ cloud_type - fixture: cloud account type
+ stored_accounts - fixture: account storage
+ use_accounts - fixture: use accounts from account storage
+ vim_host_override - fixture: use specified vim instead of account's vim
+ vim_ssl_enabled - fixture: enable or disable ssl regardless of accounts setting
+ vim_user_domain_override - fixture: use specified user domain instead of account's user domain
+ vim_project_domain_override - fixture: use specified project domain instead of account's project domain
Returns:
A list of CloudAccounts
'''
+
+
accounts = []
- for idx, cloud_tenant in enumerate(cloud_tenants):
- cloud_account_name = "{prefix}-{idx}".format(prefix=cloud_name_prefix, idx=idx)
-
- if cloud_type == 'lxc':
- accounts.append(
- cloud_module.CloudAccount.from_dict({
- "name": cloud_account_name,
- "account_type": "cloudsim_proxy"})
- )
- elif cloud_type == 'openstack':
- password = 'mypasswd'
- auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
- mgmt_network = os.getenv('MGMT_NETWORK', 'private')
- accounts.append(
- cloud_module.CloudAccount.from_dict({
- 'name': cloud_account_name,
- 'account_type': 'openstack',
- 'openstack': {
- 'admin': True,
- 'key': cloud_user,
- 'secret': password,
- 'auth_url': auth_url,
- 'tenant': cloud_tenant,
- 'mgmt_network': mgmt_network}})
- )
- elif cloud_type == 'mock':
- accounts.append(
- cloud_module.CloudAccount.from_dict({
- "name": cloud_account_name,
- "account_type": "mock"})
- )
+
+ if use_accounts:
+ for account_name in stored_accounts:
+ if account_name in use_accounts:
+ if vim_host_override and stored_accounts[account_name].account_type == 'openstack':
+ old_auth = stored_accounts[account_name].openstack.auth_url
+ stored_accounts[account_name].openstack.auth_url = re.sub('(?:(?<=https://)|(?<=http://)).*?(?=:)', vim_host_override, old_auth)
+ if vim_ssl_enabled == False:
+ stored_accounts[account_name].openstack.auth_url = re.sub(
+ '^https',
+ 'http',
+ stored_accounts[account_name].openstack.auth_url
+ )
+ elif vim_ssl_enabled == True:
+ stored_accounts[account_name].openstack.auth_url = re.sub(
+ '^http(?=:)',
+ 'https',
+ stored_accounts[account_name].openstack.auth_url
+ )
+ if vim_user_domain_override:
+ stored_accounts[account_name].openstack.user_domain = vim_user_domain_override
+ if vim_project_domain_override:
+ stored_accounts[account_name].openstack.project_domain = vim_project_domain_override
+ accounts.append(stored_accounts[account_name])
+ else:
+ def account_name_generator(prefix):
+ '''Generator of unique account names for a given prefix
+ Arguments:
+ prefix - prefix of account name
+ '''
+ idx=0
+ while True:
+ yield "{prefix}-{idx}".format(prefix=prefix, idx=idx)
+ idx+=1
+ name_gen = account_name_generator(cloud_name_prefix)
+
+ for cloud_tenant in cloud_tenants:
+ if cloud_type == 'lxc':
+ accounts.append(
+ cloud_module.CloudAcc.from_dict({
+ "name": next(name_gen),
+ "account_type": "cloudsim_proxy"})
+ )
+ elif cloud_type == 'openstack':
+ hosts = [cloud_host]
+ if request.config.option.upload_images_multiple_accounts:
+ hosts.append('10.66.4.32')
+ for host in hosts:
+ password = 'mypasswd'
+ auth_url = 'http://{host}:5000/v3/'.format(host=host)
+ if vim_ssl_enabled == True:
+ auth_url = 'https://{host}:5000/v3/'.format(host=host)
+ mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+ accounts.append(
+ cloud_module.YangData_RwProject_Project_Cloud_Account.from_dict({
+ 'name': next(name_gen),
+ 'account_type': 'openstack',
+ 'openstack': {
+ 'admin': True,
+ 'key': cloud_user,
+ 'secret': password,
+ 'auth_url': auth_url,
+ 'tenant': cloud_tenant,
+ 'mgmt_network': mgmt_network,
+ 'floating_ip_pool': 'public',
+ }}))
+ elif cloud_type == 'mock':
+ accounts.append(
+ cloud_module.CloudAcc.from_dict({
+ "name": next(name_gen),
+ "account_type": "mock"})
+ )
return accounts
@pytest.fixture(scope='session', autouse=True)
def cloud_account(cloud_accounts):
- '''fixture which returns an instance of CloudAccount
+ '''fixture which returns an instance of RwCloudYang.CloudAcc
Arguments:
cloud_accounts - fixture: list of generated cloud accounts
Returns:
- An instance of CloudAccount
+ An instance of RwCloudYang.CloudAcc
'''
return cloud_accounts[0]
@pytest.fixture(scope='class')
-def openstack_client(cloud_host, cloud_user, cloud_tenant):
- """Fixture which returns a session to openstack host.
+def vim_clients(cloud_accounts):
+ """Fixture which returns sessions to VIMs"""
+ vim_sessions = {}
+ for cloud_account in cloud_accounts:
+ if cloud_account.account_type == 'openstack':
+ vim_sessions[cloud_account.name] = rift.rwcal.openstack.OpenstackDriver(**{
+ 'username': cloud_account.openstack.key,
+ 'password': cloud_account.openstack.secret,
+ 'auth_url': cloud_account.openstack.auth_url,
+ 'project': cloud_account.openstack.tenant,
+ 'mgmt_network': cloud_account.openstack.mgmt_network,
+ 'cert_validate': cloud_account.openstack.cert_validate,
+ 'user_domain': cloud_account.openstack.user_domain,
+ 'project_domain': cloud_account.openstack.project_domain,
+ 'region': cloud_account.openstack.region
+ })
+ # Add initialization for other VIM types
+ return vim_sessions
- Returns:
- Session to an openstack host.
- """
- password = 'mypasswd'
- auth_url = 'http://{cloud_host}:5000/v3/'.format(cloud_host=cloud_host)
- mgmt_network = os.getenv('MGMT_NETWORK', 'private')
- return rift.rwcal.openstack.OpenstackDriver(**{'username': cloud_user,
- 'password': password,
- 'auth_url': auth_url,
- 'project' : cloud_tenant,
- 'mgmt_network': mgmt_network,
- 'cert_validate': False,
- 'user_domain': 'Default',
- 'project_domain':'Default',
- 'region': 'RegionOne'})
+@pytest.fixture(scope='session')
+def openmano_prefix():
+ '''Fixture that returns the prefix to be used for openmano resource names'''
+ return 'openmano'
+
+@pytest.fixture(scope='session')
+def openmano_hosts(sut_host_names):
+ '''Fixture that returns the set of host logical names to be used for openmano'''
+ return [name for name in sut_host_names if 'openmano' in name]
+
+@pytest.fixture(scope='session')
+def openmano_accounts(openmano_hosts, sut_host_addrs, cloud_accounts, openmano_prefix, logger):
+ """Fixture that returns a list of Openmano accounts. One per host, and tenant provided"""
+ accounts=[]
+
+ if not openmano_hosts:
+ return accounts
+
+ host_cycle = itertools.cycle(openmano_hosts)
+ for cloud_account in cloud_accounts:
+ if cloud_account.account_type not in ['openstack']:
+ logger.warning('Skipping creating ro datacenter for cloud account [%s] - unsupported account type [%s]', cloud_account.name, cloud_account.account_type)
+ continue
+
+ try:
+ host = next(host_cycle)
+ except StopIteration:
+ break
+
+ if cloud_account.account_type == 'openstack':
+ accounts.append({
+ 'account_name': "vim_%s" % cloud_account.name,
+ 'openmano_tenant': host,
+ 'openmano_addr': sut_host_addrs[host],
+ 'openmano_port': 9090,
+ 'datacenter': 'dc_%s' % (cloud_account.name),
+ 'vim_account': cloud_account,
+ 'vim_name': cloud_account.name,
+ 'vim_type': cloud_account.account_type,
+ 'vim_auth_url': cloud_account.openstack.auth_url,
+ 'vim_user':cloud_account.openstack.key,
+ 'vim_password':cloud_account.openstack.secret,
+ 'vim_tenant':cloud_account.openstack.tenant,
+ })
+
+ return accounts
+
+@pytest.fixture(scope='session')
+def ro_account_info(openmano_accounts):
+ ro_account_info = {}
+ for account in openmano_accounts:
+ ssh_cmd = (
+ 'ssh {openmano_addr} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- '
+ ).format(
+ openmano_addr=account['openmano_addr']
+ )
+
+ if account['account_name'] not in ro_account_info:
+ tenant_create_cmd = (
+ '{ssh_cmd} openmano tenant-create {name}'
+ ).format(
+ ssh_cmd=ssh_cmd,
+ name=account['account_name']
+ )
+ tenant_info = subprocess.check_output(tenant_create_cmd, shell=True).decode('ascii')
+ (tenant_id, tenant_name) = tenant_info.split()
+ ro_account_info[account['account_name']] = {
+ 'tenant_id':tenant_id,
+ 'account': account,
+ 'account_type':'openmano',
+ 'host':account['openmano_addr'],
+ 'port':9090,
+ 'datacenters':[],
+ }
+ else:
+ tenant_id = ro_account_info[account['account_name']]['tenant_id']
+
+ datacenter_create_cmd = (
+ '{ssh_cmd} openmano datacenter-create --type {vim_type} {datacenter} {vim_auth_url}'
+ ).format(
+ ssh_cmd=ssh_cmd,
+ vim_type=account['vim_type'],
+ datacenter=account['datacenter'],
+ vim_auth_url=account['vim_auth_url']
+ )
+ datacenter_attach_cmd = (
+ '{ssh_cmd} OPENMANO_TENANT={tenant_id} openmano datacenter-attach {datacenter} --user={vim_user} '
+ '--password={vim_password} --vim-tenant-name={vim_tenant}'
+ ).format(
+ ssh_cmd=ssh_cmd,
+ tenant_id=tenant_id,
+ datacenter=account['datacenter'],
+ vim_user=account['vim_user'],
+ vim_password=account['vim_password'],
+ vim_tenant=account['vim_tenant']
+ )
+ subprocess.check_call(datacenter_create_cmd, shell=True)
+ subprocess.check_call(datacenter_attach_cmd, shell=True)
+
+ ro_account_info[account['account_name']]['datacenters'].append(account['datacenter'])
+ return ro_account_info
+
+
+@pytest.fixture(scope='session')
+def ro_accounts(ro_account_info):
+ '''Fixture that returns a map of RwRoAccountYang.ROAccount objects for each
+ account in ro_account_info
+ '''
+ ro_accounts = {}
+ for name, account_info in ro_account_info.items():
+ ro_accounts[name] = RwRoAccountYang.YangData_RwProject_Project_RoAccount_Account.from_dict({
+ 'name':name,
+ 'ro_account_type':account_info['account_type'],
+ 'openmano':{
+ 'host':account_info['host'],
+ 'port':account_info['port'],
+ 'tenant_id':account_info['tenant_id'],
+ }
+ })
+ return ro_accounts
+
+@pytest.fixture(scope='session')
+def ro_map(ro_account_info, ro_accounts):
+ '''Fixture that returns a map of vim name to datacenter / ro name tuples for each account in ro_account_info
+ '''
+ ro_map = {}
+ for account_name, account_info in ro_account_info.items():
+ vim_name = account_info['account']['vim_account'].name
+ datacenter_name = account_info['account']['datacenter']
+ ro_map[vim_name] = (account_name, datacenter_name)
+ return ro_map
+
+@pytest.fixture(scope='session')
+def cal(cloud_account):
+ """Fixture which returns cal interface"""
+ if cloud_account.account_type == 'openstack':
+ plugin = rw_peas.PeasPlugin('rwcal_openstack', 'RwCal-1.0')
+ elif cloud_account.account_type == 'openvim':
+ plugin = rw_peas.PeasPlugin('rwcal_openmano_vimconnector', 'RwCal-1.0')
+ elif cloud_account.account_type == 'aws':
+ plugin = rw_peas.PeasPlugin('rwcal_aws', 'RwCal-1.0')
+ elif cloud_account.account_type == 'vsphere':
+ plugin = rw_peas.PeasPlugin('rwcal-python', 'RwCal-1.0')
+
+ engine, info, extension = plugin()
+ cal = plugin.get_interface("Cloud")
+ rwloggerctx = rwlogger.RwLog.Ctx.new("Cal-Log")
+ rc = cal.init(rwloggerctx)
+ assert rc == RwTypes.RwStatus.SUCCESS
+
+ return cal
+
+@pytest.fixture(scope='session')
+def rbac_user_passwd():
+ """A common password being used for all rbac users."""
+ return 'mypasswd'
+
+@pytest.fixture(scope='session')
+def user_domain(tbac):
+ """user-domain being used in this rbac test."""
+ if tbac:
+ return 'tbacdomain'
+ return 'system'
+
+@pytest.fixture(scope='session')
+def platform_roles():
+ """Returns a tuple of platform roles"""
+ return ('rw-rbac-platform:platform-admin', 'rw-rbac-platform:platform-oper', 'rw-rbac-platform:super-admin')
+
+@pytest.fixture(scope='session')
+def user_roles():
+ """Returns a tuple of user roles"""
+ return ('rw-project:project-admin', 'rw-project:project-oper', 'rw-project-mano:catalog-oper', 'rw-project-mano:catalog-admin',
+ 'rw-project-mano:lcm-admin', 'rw-project-mano:lcm-oper', 'rw-project-mano:account-admin', 'rw-project-mano:account-oper',)
+
+@pytest.fixture(scope='session')
+def all_roles(platform_roles, user_roles):
+ """Returns a tuple of platform roles plus user roles"""
+ return platform_roles + user_roles
+
+@pytest.fixture(scope='session')
+def rw_user_proxy(mgmt_session):
+ return mgmt_session.proxy(RwUserYang)
+
+@pytest.fixture(scope='session')
+def rw_project_proxy(mgmt_session):
+ return mgmt_session.proxy(RwProjectYang)
+
+@pytest.fixture(scope='session')
+def rw_rbac_int_proxy(mgmt_session):
+ return mgmt_session.proxy(RwRbacInternalYang)
+
+@pytest.fixture(scope='session')
+def rw_ro_account_proxy(mgmt_session):
+ return mgmt_session.proxy(RwRoAccountYang)
+
+@pytest.fixture(scope='session')
+def rw_conman_proxy(mgmt_session):
+ return mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='session')
+def rbac_platform_proxy(mgmt_session):
+ return mgmt_session.proxy(RwRbacPlatformYang)
+
+@pytest.fixture(scope='session')
+def project_keyed_xpath():
+ return '/project[name={project_name}]'
+
+@pytest.fixture(scope='session')
+def user_keyed_xpath():
+ return "/user-config/user[user-name={user}][user-domain={domain}]"
+
+@pytest.fixture(scope='session')
+def platform_config_keyed_xpath():
+ return "/rbac-platform-config/user[user-name={user}][user-domain={domain}]"
+
+@pytest.fixture(scope='session')
+def fmt_vnfd_catalog_xpath():
+ """Fixture that returns vnfd-catalog keyed xpath"""
+ xpath = '/project[name={project}]/vnfd-catalog'
+ return xpath
+
+@pytest.fixture(scope='session')
+def fmt_vnfd_id_xpath():
+ """Fixture that returns vnfd id xpath"""
+ xpath = '/rw-project:project[rw-project:name={project}]/project-vnfd:vnfd-catalog/project-vnfd:vnfd[project-vnfd:id={vnfd_id}]'
+ return xpath
+
+@pytest.fixture(scope='session')
+def fmt_nsd_catalog_xpath():
+ """Fixture that returns nsd-catalog keyed xpath"""
+ xpath = '/project[name={project}]/nsd-catalog'
+ return xpath
+
+@pytest.fixture(scope='session')
+def fmt_nsd_id_xpath():
+ """Fixture that returns nsd id xpath"""
+ xpath = '/rw-project:project[rw-project:name={project}]/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={nsd_id}]'
+ return xpath
+
+@pytest.fixture(scope='session')
+def fmt_prefixed_cloud_xpath():
+ """Fixture that returns cloud keyed xpath"""
+ xpath = '/rw-project:project[rw-project:name={project}]/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={account_name}]'
+ return xpath
+
+@pytest.fixture(scope='session')
+def fmt_cloud_xpath():
+ """Fixture that returns cloud keyed xpath without yang prefix"""
+ xpath = '/project[name={project}]/cloud/account[name={account_name}]'
+ return xpath
+
+@pytest.fixture(scope='session', autouse=True)
+def launchpad_glance_api_log():
+ log_file = os.path.join(
+ os.environ.get('HOME_RIFT', os.environ.get('RIFT_INSTALL')),
+ 'var','rift','log','glance','glance-api.log'
+ )
+ return log_file
+
+@pytest.fixture(scope='session', autouse=True)
+def _glance_api_scraper_session(request, log_manager, confd_host, launchpad_glance_api_log):
+ '''Fixture which returns an instance of rift.auto.log.FileSource to scrape
+ the glance api logs of the launchpad host
+ '''
+ scraper = rift.auto.log.FileSource(host=confd_host, path=launchpad_glance_api_log)
+ log_manager.source(source=scraper)
+ return scraper
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
import tempfile
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwNsrYang,
RwVnfrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
@brief Scriptable load-balancer test with multi-vm VNFs
"""
+import gi
import json
import logging
import os
import uuid
from gi.repository import (
- NsdYang,
+ RwProjectNsdYang,
NsrYang,
RwNsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.auto.mano
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsr.nsd_ref = nsd_id
nsr.admin_status = "ENABLED"
nsr.input_parameter.extend(input_param_list)
- nsr.cloud_account = cloud_account_name
+ nsr.datacenter = cloud_account_name
return nsr
pass
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
def check_status_onboard_status():
- uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+ uri = 'http://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
curl_cmd = 'curl --insecure {uri}'.format(
uri=uri
)
trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, slb_vnfd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be two vnfds"
assert "multivm_slb_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
config_param.value,
running_nsr_config.input_parameter))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:description" % quoted_key(nsd.id)
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
logger.info("Instantiating the Network Service")
- rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
# Verify the input parameter configuration
- running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+ running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
for input_param in input_parameters:
verify_input_parameters(running_config, input_param)
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.id))
rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
"""
logger.debug("Terminating Multi VM VNF's NSR")
- nsr_path = "/ns-instance-config"
+ nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
nsr = rwnsr_proxy.get_config(nsr_path)
ping_pong = nsr.nsr[0]
- rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+ rwnsr_proxy.delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id)))
time.sleep(30)
Asserts:
The records are deleted.
"""
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
for nsd in nsds.nsd:
- xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
nsd_proxy.delete_config(xpath)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
time.sleep(5)
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
assert nsds is None or len(nsds.nsd) == 0
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
@brief Scriptable load-balancer test with multi-vm VNFs
"""
+import gi
import json
import logging
import os
import uuid
from gi.repository import (
- NsdYang,
+ RwProjectNsdYang,
NsrYang,
RwNsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.auto.mano
Return:
NSR object
"""
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = rift.auto.mano.resource_name(nsr.id)
nsr.nsd_ref = nsd_id
nsr.admin_status = "ENABLED"
nsr.input_parameter.extend(input_param_list)
- nsr.cloud_account = cloud_account_name
+ nsr.datacenter = cloud_account_name
return nsr
pass
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=10, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete", transaction_id)
def check_status_onboard_status():
- uri = 'http://%s:4567/api/upload/%s/state' % (host, transaction_id)
+ uri = 'http://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
curl_cmd = 'curl --insecure {uri}'.format(
uri=uri
)
trans_id = upload_descriptor(logger, trafgen_vnfd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
trans_id = upload_descriptor(logger, trafsink_vnfd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "multivm_trafsink_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, multi_vm_vnf_nsd_package_file, launchpad_host)
wait_onboard_transaction_finished(logger, trans_id, host=launchpad_host)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
config_param.value,
running_nsr_config.input_parameter))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:description" % quoted_key(nsd.id)
descr_value = "New NSD Description"
in_param_id = str(uuid.uuid4())
- input_param_1= NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1= NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
nsr = create_nsr(nsd.id, input_parameters, cloud_account_name)
logger.info("Instantiating the Network Service")
- rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
# Verify the input parameter configuration
- running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+ running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
for input_param in input_parameters:
verify_input_parameters(running_config, input_param)
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.id)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.id))
rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=360)
"""
logger.debug("Terminating Multi VM VNF's NSR")
- nsr_path = "/ns-instance-config"
+ nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
nsr = rwnsr_proxy.get_config(nsr_path)
ping_pong = nsr.nsr[0]
- rwnsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(ping_pong.id))
+ rwnsr_proxy.delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id)))
time.sleep(30)
Asserts:
The records are deleted.
"""
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
for nsd in nsds.nsd:
- xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
nsd_proxy.delete_config(xpath)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
time.sleep(5)
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
assert nsds is None or len(nsds.nsd) == 0
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
@brief Scriptable load-balancer test with multi-vm VNFs
"""
+import gi
import ipaddress
import pytest
import re
RwVnfBaseConfigYang,
RwTrafgenYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.fixture(scope='session')
def trafgen_vnfr(request, rwvnfr_proxy, session_type):
- vnfr = "/vnfr-catalog/vnfr"
+ vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
for vnfr in vnfrs.vnfr:
if 'trafgen' in vnfr.short_name:
Arguments:
vnf_name - vnf name of configuration
'''
- xpath = "/vnf-config/vnf[name='%s'][instance='0']" % vnf_name
+ xpath = "/rw-project:project[rw-project:name='default']/vnf-config/vnf[name=%s][instance='0']" % quoted_key(vnf_name)
for _ in range(24):
tg_config = tgcfg_proxy.get_config(xpath)
if tg_config is not None:
'''
return (int(current_sample) - int(previous_sample)) > threshold
- xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
- vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'),
+ xpath = "/rw-project:project[rw-project:name='default']/vnf-opdata/vnf[name={}][instance='0']/port-state[portname={}]/counters/{}"
+ vnfdata_proxy.wait_for_interval(xpath.format(quoted_key(vnf_name), quoted_key(port_name), quoted_key('input-packets')),
value_incremented, timeout=timeout, interval=interval)
'''
return (int(current_sample) - int(previous_sample)) < threshold
- xpath = "/vnf-opdata/vnf[name='{}'][instance='0']/port-state[portname='{}']/counters/{}"
- vnfdata_proxy.wait_for_interval(xpath.format(vnf_name, port_name, 'input-packets'), value_unchanged, timeout=timeout, interval=interval)
+ xpath = "/rw-project:project[rw-project:name='default']/vnf-opdata/vnf[name={}][instance='0']/port-state[portname={}]/counters/{}"
+ vnfdata_proxy.wait_for_interval(xpath.format(quoted_key(vnf_name), quoted_key(port_name), quoted_key('input-packets')), value_unchanged, timeout=timeout, interval=interval)
@pytest.mark.depends('multivmvnf')
@pytest.mark.incremental
import tempfile
import shutil
import subprocess
+import random
import gi
+import rift.auto.descriptor
import rift.auto.session
-import rift.mano.examples.ping_pong_nsd as ping_pong
+import rift.mano.examples.ping_pong_nsd as ping_pong_example
import rift.vcs.vcs
class PackageError(Exception):
pass
@pytest.fixture(scope='session', autouse=True)
-def cloud_account_name(request):
- '''fixture which returns the name used to identify the cloud account'''
- return 'cloud-0'
+def multidisk_testdata(request, descriptor_images, path_ping_image, path_pong_image):
+ """fixture which returns test data related to multidisk test"""
+
+ if not request.config.option.multidisk:
+ return None
+
+ iso_img, qcow2_img = [os.path.basename(image) for image in descriptor_images]
+
+ ping_ = {'vda': ['disk', 'virtio', 5, os.path.basename(path_ping_image), 0],
+ 'sda': ['cdrom', 'scsi', 5, iso_img, 1],
+ 'hda': ['disk', 'ide', 5, None, None],
+ }
+
+ pong_ = {'vda': ['disk', 'virtio', 5, os.path.basename(path_pong_image), 0],
+ 'hda': ['cdrom', 'ide', 5, iso_img, 1],
+ 'hdb': ['disk', 'ide', 5, qcow2_img, 2],
+ }
+ return ping_, pong_
+
+@pytest.fixture(scope='session')
+def port_sequencing_intf_positions():
+ """fixture which returns a list of ordered positions for pong interfaces related to port sequencing test"""
+ return random.sample(range(1, 2**32-1), 3)
@pytest.fixture(scope='session')
def ping_pong_install_dir():
]
return image_dirs
+@pytest.fixture(scope='session')
+def random_image_name(image_dirs):
+ """Fixture which returns a random image name"""
+ return 'image_systemtest_{}.qcow2'.format(random.randint(100, 9999))
+
@pytest.fixture(scope='session')
def image_paths(image_dirs):
''' Fixture containing a mapping of image names to their path images
'''
return image_paths["Fedora-x86_64-20-20131211.1-sda-pong.qcow2"]
+@pytest.fixture(scope='session')
+def rsyslog_userdata(rsyslog_host, rsyslog_port):
+ ''' Fixture providing rsyslog user data
+ Arguments:
+ rsyslog_host - host of the rsyslog process
+ rsyslog_port - port of the rsyslog process
+ '''
+ if rsyslog_host and rsyslog_port:
+ return '''
+rsyslog:
+ - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
+ - "*.* @{host}:{port}"
+ '''.format(
+ host=rsyslog_host,
+ port=rsyslog_port,
+ )
+
+ return None
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong_vnf_input_params():
+ return ping_pong_example.generate_ping_pong_descriptors(
+ pingcount=1,
+ nsd_name='pp_input_nsd',
+ vnfd_input_params=True,
+ )
+
+@pytest.fixture(scope='session')
+def packages_pingpong_vnf_input_params(descriptors_pingpong_vnf_input_params):
+ return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong_vnf_input_params)
+
+@pytest.fixture(scope='session')
+def ping_script_userdata():
+ userdata = '''#cloud-config
+password: fedora
+chpasswd: { expire: False }
+ssh_pwauth: True
+runcmd:
+ - [ systemctl, daemon-reload ]
+ - [ systemctl, enable, {{ CI-script-init-data }}.service ]
+ - [ systemctl, start, --no-block, {{ CI-script-init-data }}.service ]
+ - [ ifup, eth1 ]
+'''
+ return userdata
+
+@pytest.fixture(scope='session')
+def pong_script_userdata():
+ userdata = '''#!/bin/bash
+sed 's/^.*PasswordAuthentication.*$/PasswordAuthentication yes/' < /etc/ssh/sshd_config > /etc/ssh/sshd_config
+systemctl daemon-reload
+systemctl enable {{ CI-script-init-data }}.service
+systemctl start --no-block {{ CI-script-init-data }}.service
+ifup eth1
+'''
+ return userdata
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong_script_input_params(ping_script_userdata, pong_script_userdata):
+ return ping_pong_example.generate_ping_pong_descriptors(
+ pingcount=1,
+ nsd_name='pp_script_nsd',
+ script_input_params=True,
+ ping_userdata=ping_script_userdata,
+ pong_userdata=pong_script_userdata,
+ )
+
+@pytest.fixture(scope='session')
+def packages_pingpong_script_input_params(descriptors_pingpong_script_input_params):
+ return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong_script_input_params)
+
class PingPongFactory:
- def __init__(self, path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+ def __init__(self, path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk, ipv6, port_sequencing, service_primitive):
+
self.path_ping_image = path_ping_image
self.path_pong_image = path_pong_image
- self.rsyslog_host = rsyslog_host
- self.rsyslog_port = rsyslog_port
+ self.rsyslog_userdata = rsyslog_userdata
+ self.static_ip = static_ip
+ self.service_primitive = service_primitive
+ self.use_vca_conf = vnf_dependencies
+ self.port_security = port_security
+ self.port_sequencing = port_sequencing
+ self.metadata_vdud = metadata_vdud
+ self.multidisk = multidisk
+ self.ipv6 = ipv6
+ if not port_security:
+ self.port_security = None # Not to disable port security if its not specific to --port-security feature.
def generate_descriptors(self):
'''Return a new set of ping and pong descriptors
ping_md5sum = md5sum(self.path_ping_image)
pong_md5sum = md5sum(self.path_pong_image)
- ex_userdata = None
- if self.rsyslog_host and self.rsyslog_port:
- ex_userdata = '''
-rsyslog:
- - "$ActionForwardDefaultTemplate RSYSLOG_ForwardFormat"
- - "*.* @{host}:{port}"
- '''.format(
- host=self.rsyslog_host,
- port=self.rsyslog_port,
- )
-
- descriptors = ping_pong.generate_ping_pong_descriptors(
+ descriptors = ping_pong_example.generate_ping_pong_descriptors(
pingcount=1,
ping_md5sum=ping_md5sum,
pong_md5sum=pong_md5sum,
- ex_ping_userdata=ex_userdata,
- ex_pong_userdata=ex_userdata,
+ ex_ping_userdata=self.rsyslog_userdata,
+ ex_pong_userdata=self.rsyslog_userdata,
+ use_static_ip=self.static_ip,
+ port_security=self.port_security,
+ explicit_port_seq=self.port_sequencing,
+ metadata_vdud=self.metadata_vdud,
+ use_vca_conf=self.use_vca_conf,
+ multidisk=self.multidisk,
+ use_ipv6=self.ipv6,
+ primitive_test=self.service_primitive,
)
return descriptors
@pytest.fixture(scope='session')
-def ping_pong_factory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port):
+def ping_pong_factory(path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk_testdata, ipv6, port_sequencing, service_primitive):
'''Fixture returns a factory capable of generating ping and pong descriptors
'''
- return PingPongFactory(path_ping_image, path_pong_image, rsyslog_host, rsyslog_port)
+ return PingPongFactory(path_ping_image, path_pong_image, static_ip, vnf_dependencies, rsyslog_userdata, port_security, metadata_vdud, multidisk_testdata, ipv6, port_sequencing, service_primitive)
@pytest.fixture(scope='session')
def ping_pong_records(ping_pong_factory):
@pytest.fixture(scope='session')
-def descriptors(request, ping_pong_records):
+def descriptors(request, ping_pong_records, random_image_name):
def pingpong_descriptors(with_images=True):
"""Generated the VNFDs & NSD files for pingpong NS.
'images/Fedora-x86_64-20-20131211.1-sda-pong.qcow2')
for descriptor in [ping_vnfd, pong_vnfd, ping_pong_nsd]:
- descriptor.write_to_file(output_format='xml', outdir=tmpdir)
-
+ descriptor.write_to_file(output_format='yaml', outdir=tmpdir)
ping_img_path = os.path.join(tmpdir, "{}/images/".format(ping_vnfd.name))
pong_img_path = os.path.join(tmpdir, "{}/images/".format(pong_vnfd.name))
shutil.copy(ping_img, ping_img_path)
shutil.copy(pong_img, pong_img_path)
+ if request.config.option.upload_images_multiple_accounts:
+ with open(os.path.join(ping_img_path, random_image_name), 'wb') as image_bin_file:
+ image_bin_file.seek(1024*1024*512) # image file of size 512 MB
+ image_bin_file.write(b'0')
+
for dir_name in [ping_vnfd.name, pong_vnfd.name, ping_pong_nsd.name]:
subprocess.call([
- "sh",
"{rift_install}/usr/rift/toolchain/cmake/bin/generate_descriptor_pkg.sh".format(rift_install=os.environ['RIFT_INSTALL']),
tmpdir,
dir_name])
return files
+ def l2portchain_descriptors():
+ """L2 port chaining packages"""
+ files = [
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_dpi_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_firewall_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_nat_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_pgw_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_router_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_sff_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/vnffg_demo_nsd/vnffg_l2portchain_demo_nsd.tar.gz")
+ ]
+
+ return files
+
+ def metadata_vdud_cfgfile_descriptors():
+ """Metadata-vdud feature related packages"""
+ files = [
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/cirros_cfgfile_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/fedora_cfgfile_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/ubuntu_cfgfile_vnfd.tar.gz"),
+ os.path.join(os.getenv('RIFT_BUILD'), "modules/ext/vnfs/src/ext_vnfs-build/cfgfile/cfgfile_nsd.tar.gz")
+ ]
+
+ return files
+
+ if request.config.option.vnf_onboard_delete:
+ return haproxy_descriptors() + l2portchain_descriptors() + list(pingpong_descriptors())
+ if request.config.option.multiple_ns_instantiate:
+ return haproxy_descriptors() + metadata_vdud_cfgfile_descriptors() + list(pingpong_descriptors())
+ if request.config.option.l2_port_chaining:
+ return l2portchain_descriptors()
+ if request.config.option.metadata_vdud_cfgfile:
+ return metadata_vdud_cfgfile_descriptors()
if request.config.option.network_service == "pingpong":
return pingpong_descriptors()
+ elif request.config.option.ha_multiple_failovers:
+ return {'pingpong': pingpong_descriptors(), 'haproxy': haproxy_descriptors(), 'vdud_cfgfile': metadata_vdud_cfgfile_descriptors()}
elif request.config.option.network_service == "pingpong_noimg":
return pingpong_descriptors(with_images=False)
elif request.config.option.network_service == "haproxy":
return images
+ def l2portchain_images():
+ """HAProxy images."""
+ images = [os.path.join(os.getenv('RIFT_ROOT'), "images/ubuntu_trusty_1404.qcow2")]
+ return images
+
+ def multidisk_images():
+ images = [
+ os.path.join(os.getenv('RIFT_ROOT'), 'images/ubuntu-16.04-mini-64.iso'),
+ os.path.join(os.getenv('RIFT_ROOT'), "images/ubuntu_trusty_1404.qcow2"),
+ ]
+ return images
+
+ def metadata_vdud_cfgfile_images():
+ """Metadata-vdud feature related images."""
+ images = [
+ os.path.join(os.getenv('RIFT_ROOT'), "images/cirros-0.3.4-x86_64-disk.img"),
+ os.path.join(os.getenv('RIFT_ROOT'), "images/Fedora-x86_64-20-20131211.1-sda.qcow2"),
+ os.path.join(os.getenv('RIFT_ROOT'), "images/UbuntuXenial")
+ ]
+
+ return images
+
+ if request.config.option.l2_port_chaining:
+ return l2portchain_images()
+ if request.config.option.multidisk:
+ return multidisk_images()
+ if request.config.option.metadata_vdud_cfgfile:
+ return metadata_vdud_cfgfile_images()
if request.config.option.network_service == "haproxy":
return haproxy_images()
+ if request.config.option.multiple_ns_instantiate:
+ return haproxy_images() + metadata_vdud_cfgfile_images()
return []
--- /dev/null
+#!/usr/bin/env python
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import gi
+import pytest
+import os
+from pyvirtualdisplay import Display
+from selenium import webdriver
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwConfigAgentYang', '1.0')
+gi.require_version('RwSdnYang', '1.0')
+
+from gi.repository import (
+ RwSdnYang,
+ RwCloudYang,
+ RwConfigAgentYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='session')
+def cloud_proxy(mgmt_session):
+ """cloud_proxy."""
+ return mgmt_session.proxy(RwCloudYang)
+
+
+@pytest.fixture(scope='session')
+def sdn_proxy(mgmt_session):
+ """sdn_proxy."""
+ return mgmt_session.proxy(RwSdnYang)
+
+
+@pytest.fixture(scope='session')
+def config_agent_proxy(mgmt_session):
+ """config_agent_proxy."""
+ return mgmt_session.proxy(RwConfigAgentYang)
+
+
+@pytest.fixture(scope='session')
+def driver(request, confd_host, logger):
+ """Set up virtual diplay and browser driver."""
+ # Set up the virtual display
+ display = Display(visible=0, size=(1024, 768))
+ display.start()
+
+ logger.info("Initializing the chrome web driver")
+ root_dir = os.environ.get('RIFT_ROOT')
+ webdriver_path = '{}/chromedriver'.format(root_dir)
+ # webdriver_path = os.environ["webdriver.chrome.driver"]
+ # Something like this should be implemented.
+
+ driver_ = webdriver.Chrome(executable_path=webdriver_path)
+ driver_.implicitly_wait(5)
+ url = "http://{}:8000/".format(confd_host)
+ logger.info("Getting the URL {}".format(url))
+ driver_.get(url)
+ WebDriverWait(driver_, 10).until(
+ EC.presence_of_element_located((By.CLASS_NAME, "logo"))
+ )
+
+ logger.info("Signing into the Rift home page")
+ driver_.find_element_by_name("username").send_keys("admin")
+ driver_.find_element_by_name("password").send_keys("admin")
+ driver_.find_element_by_id("submit").click()
+ WebDriverWait(driver_, 10).until(
+ EC.presence_of_element_located((By.CLASS_NAME, "skyquakeNav"))
+ )
+
+ def teardown():
+ driver_.quit()
+ display.stop()
+
+ yield driver_
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+import gi
+
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+
+from gi.repository import (
+ RwUserYang,
+ RwProjectYang,
+ RwConmanYang
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class TestGUI(object):
+ """TestGUI."""
+
+ def click_element_and_wait(self, driver, key_word, wait=True):
+ """Click and wait for that element to appear."""
+ path = "//a[text()={}]".format(quoted_key(key_word))
+ driver.find_element_by_xpath(path).click()
+ if wait is True:
+ WebDriverWait(driver, 10).until(
+ EC.presence_of_element_located((
+ By.XPATH, path)))
+
+ def click_button(self, driver, key_word):
+ """Click a button."""
+ path = "//div[text()={}]".format(quoted_key(key_word))
+ driver.find_element_by_xpath(path).click()
+
+ def input_value(self, driver, data_reactid, value):
+ """Input values to field."""
+ path = "//input[@data-reactid={}]".format(quoted_key(data_reactid))
+ driver.find_element_by_xpath(path).send_keys(value)
+
+ def test_basic_checks(
+ self, driver, logger, rw_project_proxy, rw_user_proxy):
+ """test_basic_checks."""
+ logger.debug('Check access to all basic pages.')
+ basic_pages = (
+ ['Accounts', 'Catalog', 'Launchpad', 'ADMINISTRATION',
+ 'PROJECT: default', 'admin'])
+ for key_word in basic_pages:
+ self.click_element_and_wait(driver, key_word)
+
+ logger.debug('Create a test project.')
+ self.click_element_and_wait(driver, 'ADMINISTRATION')
+ self.click_element_and_wait(driver, 'Project Management', wait=False)
+ self.click_button(driver, 'Add Project')
+ self.input_value(driver, '.0.4.0.1.0.4.0.0.1.0.1', 'test_project')
+ self.click_button(driver, 'Create')
+
+ logger.debug('Verify test project is created in ui.')
+ path = "//div[text()={}]".format(quoted_key('test_project'))
+ WebDriverWait(driver, 10).until(
+ EC.presence_of_element_located((
+ By.XPATH, path)))
+
+ logger.debug('Verify test project is created in config.')
+ project_cm_config_xpath = '/project[name={}]/project-state'
+ project_ = rw_project_proxy.get_config(
+ project_cm_config_xpath.format(
+ quoted_key('test_project')), list_obj=True)
+ assert project_
+
+ logger.debug('Create a test user.')
+ self.click_element_and_wait(driver, 'ADMINISTRATION')
+ self.click_element_and_wait(driver, 'User Management', wait=False)
+ self.click_button(driver, 'Add User')
+ self.input_value(driver, '.0.4.0.1.1.0.4.0.0.1.0.1', 'test_user')
+ self.input_value(driver, '.0.4.0.1.1.0.4.0.3.1.0.1', 'mypasswd')
+ self.input_value(driver, '.0.4.0.1.1.0.4.0.3.1.1.1', 'mypasswd')
+ self.click_button(driver, 'Create')
+
+ logger.debug('Verify test user is created in ui.')
+ path = "//div[text()={}]".format(quoted_key('test_user'))
+ WebDriverWait(driver, 10).until(
+ EC.presence_of_element_located((
+ By.XPATH, path)))
+
+ logger.debug('Verify test user is created in config.')
+ user_config_xpath = (
+ '/user-config/user[user-name={user_name}][user-domain={domain}]')
+ user_ = rw_user_proxy.get_config(
+ user_config_xpath.format(
+ user_name=quoted_key('test_user'),
+ domain=quoted_key('system')))
+ assert user_
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import pytest
+import subprocess
+import os
+import time
+
+import rift.vcs.vcs
+import rift.auto.mano as mano
+
+from gi.repository import (
+ RwConmanYang,
+ RwUserYang,
+ RwProjectYang,
+ RwRbacInternalYang,
+ RwRbacPlatformYang,
+ RwCloudYang,
+)
+
+@pytest.fixture(scope='session')
+def ha_mgmt_sessions(sut_host_addrs, session_type):
+ """Fixture that returns mgmt sessions for active, standby LPs"""
+ sessions = {}
+ for name,addr in sut_host_addrs.items():
+ if session_type == 'netconf':
+ mgmt_session = rift.auto.session.NetconfSession(host=addr)
+ elif session_type == 'restconf':
+ mgmt_session = rift.auto.session.RestconfSession(host=addr)
+
+ if 'standby' in name:
+ sessions['standby'] = mgmt_session
+ elif 'active' in name:
+ sessions['active'] = mgmt_session
+ mgmt_session.connect()
+ rift.vcs.vcs.wait_until_system_started(mgmt_session)
+
+ return sessions
+
+@pytest.fixture(scope='session')
+def active_mgmt_session(ha_mgmt_sessions):
+ """Fixture that returns mgmt sessions for active LP"""
+ return ha_mgmt_sessions['active']
+
+@pytest.fixture(scope='session')
+def standby_mgmt_session(ha_mgmt_sessions):
+ """Fixture that returns mgmt sessions for standby LP"""
+ return ha_mgmt_sessions['standby']
+
+@pytest.fixture(scope='session')
+def active_confd_host(active_mgmt_session):
+ """Fixture that returns mgmt sessions for active LP"""
+ return active_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def standby_confd_host(standby_mgmt_session):
+ """Fixture that returns mgmt sessions for standby LP"""
+ return standby_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def revertive_pref_host(active_mgmt_session):
+ """Fixture that returns mgmt sessions for active LP"""
+ return active_mgmt_session.host
+
+@pytest.fixture(scope='session')
+def active_site_name(active_mgmt_session):
+ """Fixture that returns mgmt sessions for active LP"""
+ return 'site-a'
+
+@pytest.fixture(scope='session')
+def standby_site_name(standby_mgmt_session):
+ """Fixture that returns mgmt sessions for standby LP"""
+ return 'site-b'
+
+@pytest.fixture(scope='session', autouse=True)
+def redundancy_config_setup(logger, active_confd_host, standby_confd_host, active_mgmt_session):
+ """Fixture that prepares the rw-redundancy-config.xml file and copies it to RVR of active, standby systems;
+ starts the mock dns script in the revertive-preference host.
+ It assumes system-tests are running containers where launchpad runs in production mode"""
+
+ # Starts the mock dns script in revertive-preference host which is the active system.
+ ssh_mock_dns_cmd = 'ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no {revertive_pref_host} -- "python3 /usr/rift/usr/rift/systemtest/util/test_mock_dns.py --active-site site-a {active_host} --standby-site site-b {standby_host}"'.format(
+ revertive_pref_host=active_confd_host, active_host=active_confd_host, standby_host=standby_confd_host)
+ logger.debug('Running mock dns script in host {host}; cmd: {ssh_cmd}'.format(host=active_confd_host,
+ ssh_cmd=ssh_mock_dns_cmd))
+ subprocess.Popen(ssh_mock_dns_cmd, shell=True)
+ # Have to check if the script ran fine
+
+ # Prepares the rw-redundancy-config.xml file
+ redundancy_cfg_file_path = os.path.join(os.getenv('RIFT_INSTALL'),
+ 'usr/rift/systemtest/config/rw-redundancy-config.xml')
+ with open(redundancy_cfg_file_path) as f:
+ file_content = f.read()
+
+ with open(redundancy_cfg_file_path+'.auto', 'w') as f:
+ new_content = file_content.replace('1.1.1.1', active_confd_host).replace('2.2.2.2', standby_confd_host)
+ logger.debug('redundancy config file content: {}'.format(new_content))
+ f.write(new_content)
+
+ # Copies the redundancy config file to active, standby systems
+ for host_addr in (active_confd_host, standby_confd_host):
+ scp_cmd = 'scp -o StrictHostkeyChecking=no {file_path} {host}:/usr/rift/var/rift/rw-redundancy-config.xml'.format(
+ file_path=redundancy_cfg_file_path+'.auto', host=host_addr)
+ logger.debug(
+ 'Copying redundancy config xml to host {host}; scp cmd: {scp_cmd}'.format(host=host_addr, scp_cmd=scp_cmd))
+ assert os.system(scp_cmd) == 0
+
+ # Restart the launchpad service in active, standby systems
+ for host_addr in (active_confd_host, standby_confd_host):
+ ssh_launchpad_restart_cmd = 'ssh -n -o BatchMode=yes -o StrictHostKeyChecking=no {host} -- "sudo pkill rwmain"'.format(
+ host=host_addr)
+ logger.debug('Restarting launchpad service in host {host}. cmd: {ssh_cmd}'.format(host=host_addr,
+ ssh_cmd=ssh_launchpad_restart_cmd))
+ assert os.system(ssh_launchpad_restart_cmd.format(host=host_addr)) == 0
+ time.sleep(30)
+
+ active_mgmt_session.connect()
+ rift.vcs.vcs.wait_until_system_started(active_mgmt_session)
+ mano.verify_ha_redundancy_state(active_mgmt_session)
+
+@pytest.fixture(scope='session')
+def ha_lp_nodes(sut_host_addrs, session_type):
+ """Fixture that returns rift.auto.mano.LpNode objects for active, standby LPs"""
+ lp_nodes = {}
+ for name,addr in sut_host_addrs.items():
+ lp_node = mano.LpNode(host=addr, session_type=session_type, connect=False)
+ if 'standby' in name:
+ lp_nodes['standby'] = lp_node
+ elif 'active' in name:
+ lp_nodes['active'] = lp_node
+
+ return lp_nodes
+
+@pytest.fixture(scope='session')
+def active_lp_node_obj(ha_lp_nodes):
+ """Fixture that returns rift.auto.mano.LpNode object for active LP"""
+ return ha_lp_nodes['active']
+
+@pytest.fixture(scope='session')
+def standby_lp_node_obj(ha_lp_nodes):
+ """Fixture that returns rift.auto.mano.LpNode object for standby LP"""
+ return ha_lp_nodes['standby']
+
+@pytest.fixture(scope='session')
+def rw_active_user_proxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwUserYang)
+
+@pytest.fixture(scope='session')
+def rw_active_project_proxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwProjectYang)
+
+@pytest.fixture(scope='session')
+def rw_active_rbac_int_proxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwRbacInternalYang)
+
+@pytest.fixture(scope='session')
+def rw_active_conman_proxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwConmanYang)
+
+@pytest.fixture(scope='session')
+def rbac_active_platform_proxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwRbacPlatformYang)
+
+@pytest.fixture(scope='session')
+def rw_active_cloud_pxy(active_mgmt_session):
+ return active_mgmt_session.proxy(RwCloudYang)
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwCloudYang,
+ RwConmanYang,
+)
+
+@pytest.fixture(scope='module')
+def test_projects():
+ projects = ['default']
+ for idx in range(1, 4):
+ projects.append('project_ha_'+str(idx))
+ return projects
+
+
+@pytest.mark.setup('active_configuration')
+@pytest.mark.incremental
+class TestActiveLpConfiguration(object):
+ """sets up the configuration as per RIFT-17854"""
+ def test_create_project_users(self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+ rw_active_project_proxy, rw_active_rbac_int_proxy, rw_active_conman_proxy, test_projects, user_roles):
+ # Create test users
+ user_name_pfx = 'user_ha_'
+ users = []
+ for idx in range(1, 9):
+ users.append(user_name_pfx+str(idx))
+ mano.create_user(rw_active_user_proxy, user_name_pfx+str(idx), rbac_user_passwd, user_domain)
+
+ # Create projects and assign roles to users in the newly created project
+ for project_name in test_projects:
+ if project_name == 'default':
+ continue
+ logger.debug('Creating project {}'.format(project_name))
+ mano.create_project(rw_active_conman_proxy, project_name)
+
+ for project_name in test_projects:
+ for _ in range(2):
+ role = random.choice(user_roles)
+ user = users.pop()
+ logger.debug('Assinging role {} to user {} in project {}'.format(role, user, project_name))
+ mano.assign_project_role_to_user(rw_active_project_proxy, role, user, project_name, user_domain,
+ rw_active_rbac_int_proxy)
+
+ def test_create_cloud_accounts(self, cloud_account, fmt_prefixed_cloud_xpath, fmt_cloud_xpath, rw_active_cloud_pxy,
+ test_projects, logger):
+ for project_name in test_projects:
+ logger.debug('Creating cloud account {} for project {}'.format(cloud_account.name, project_name))
+ xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name))
+ rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+ xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name))
+ response = rw_active_cloud_pxy.get(xpath_no_pfx)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+ rw_active_cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(project_name), account_name=quoted_key(
+ cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+ def test_onboard_descriptors(self, descriptors, test_projects, active_mgmt_session, fmt_nsd_catalog_xpath, logger):
+ # Uploads the descriptors
+ for project_name in test_projects:
+ for descriptor in descriptors:
+ logger.debug('Onboarding descriptor {} for project {}'.format(descriptor, project_name))
+ rift.auto.descriptor.onboard(active_mgmt_session, descriptor, project=project_name)
+
+ # Verify whether the descriptors uploaded successfully
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ for project_name in test_projects:
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_name))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+
+ @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"), reason="need --nsr-test option to run")
+ def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_projects):
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+ for project_name in test_projects:
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_name))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+ logger.debug('Instantiating NS for project {}'.format(project_name))
+ rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=project_name)
+
+
+@pytest.mark.depends('active_configuration')
+@pytest.mark.setup('first-failover')
+@pytest.mark.incremental
+class TestHaFirstFailover(object):
+ def test_collect_active_lp_data(self, active_lp_node_obj, active_confd_host, standby_confd_host, logger):
+ mano.verify_hagr_endpoints(active_confd_host, standby_confd_host)
+ active_lp_node_obj.collect_data()
+
+ def test_attempt_indirect_failover(self, revertive_pref_host, active_confd_host, standby_confd_host,
+ active_site_name, standby_site_name, logger):
+ # Wait for redundancy poll interval though collecting data on active LP takes more than 5 secs
+ time.sleep(5)
+ logger.debug('Attempting first failover. Host {} will be new active'.format(standby_confd_host))
+ mano.indirect_failover(revertive_pref_host, new_active_ip=standby_confd_host, new_active_site=standby_site_name,
+ new_standby_ip=active_confd_host, new_standby_site=active_site_name)
+
+ def test_wait_for_standby_to_comeup(self, standby_mgmt_session, active_confd_host, standby_confd_host):
+ """Wait for the standby to come up; Wait for endpoint 'ha/geographic/active' to return 200"""
+ mano.wait_for_standby_to_become_active(standby_mgmt_session)
+ # mano.verify_hagr_endpoints(active_host=standby_confd_host, standby_host=active_confd_host)
+
+ def test_collect_standby_lp_data(self, standby_lp_node_obj, standby_mgmt_session, cloud_account,
+ fmt_cloud_xpath, test_projects, fmt_nsd_catalog_xpath):
+ time.sleep(180)
+ rw_new_active_cloud_pxy = standby_mgmt_session.proxy(RwCloudYang)
+ nsd_pxy = standby_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_proxy = standby_mgmt_session.proxy(RwNsrYang)
+
+ for project_name in test_projects:
+ rw_new_active_cloud_pxy.wait_for(
+ fmt_cloud_xpath.format(project=quoted_key(project_name), account_name=quoted_key(
+ cloud_account.name)) + '/connection-status/status', 'success', timeout=60, fail_on=['failure'])
+
+ # nsd_catalog = nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+ # assert nsd_catalog
+
+ if pytest.config.getoption("--nsr-test"):
+ nsr_opdata = rwnsr_proxy.get(
+ '/rw-project:project[rw-project:name={project}]/ns-instance-opdata'.format(
+ project=quoted_key(project_name)))
+ assert nsr_opdata
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name={project}]/ns-instance-opdata/nsr[ns-instance-config-ref={config_ref}]/config-status".format(
+ project=quoted_key(project_name), config_ref=quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+ standby_lp_node_obj.collect_data()
+
+ def test_match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+ active_lp_node_obj.compare(standby_lp_node_obj)
+
+
+@pytest.mark.depends('first-failover')
+@pytest.mark.setup('active-teardown')
+@pytest.mark.incremental
+class TestHaTeardown(object):
+ """It terminates the NS & deletes descriptors, cloud accounts, projects"""
+ @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"), reason="need --nsr-test option to run")
+ def test_terminate_nsr(self, test_projects, standby_mgmt_session, logger):
+ rwnsr_pxy = standby_mgmt_session.proxy(RwNsrYang)
+ rwvnfr_pxy = standby_mgmt_session.proxy(RwVnfrYang)
+ rwvlr_pxy = standby_mgmt_session.proxy(RwVlrYang)
+
+ for project_name in test_projects:
+ logger.debug("Trying to terminate NSR in project {}".format(project_name))
+ rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, project_name)
+
+ def test_delete_descriptors(self, standby_mgmt_session, test_projects, logger):
+ for project_name in test_projects:
+ logger.info("Trying to delete the descriptors in project {}".format(project_name))
+ rift.auto.descriptor.delete_descriptors(standby_mgmt_session, project_name)
+
+ def test_delete_cloud_accounts(self, standby_mgmt_session, logger, test_projects, cloud_account):
+ for project_name in test_projects:
+ logger.info("Trying to delete the cloud-account in project {}".format(project_name))
+ rift.auto.mano.delete_cloud_account(standby_mgmt_session, cloud_account.name, project_name)
+
+ def test_delete_projects(self, standby_mgmt_session, test_projects, logger):
+ rw_conman_proxy = standby_mgmt_session.proxy(RwConmanYang)
+ for project_name in test_projects:
+ if project_name == 'default':
+ continue
+ logger.debug('Deleting project {}'.format(project_name))
+ rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+
+
+@pytest.mark.depends('active-teardown')
+@pytest.mark.incremental
+class TestHaFailoverToOriginalActive(object):
+ """Does a failover to original active and verifies the config"""
+ def test_collect_current_active_lp_data(self, standby_lp_node_obj, logger):
+ time.sleep(30)
+ logger.debug('Collecting data for host {}'.format(standby_lp_node_obj.host))
+ standby_lp_node_obj.collect_data()
+
+ def test_attempt_indirect_failover(self, revertive_pref_host, active_confd_host, standby_confd_host,
+ active_site_name, standby_site_name, logger):
+ # Wait for redundancy poll interval.
+ time.sleep(5)
+ logger.debug('Attempting second failover. Host {} will be new active'.format(active_confd_host))
+ mano.indirect_failover(revertive_pref_host, new_active_ip=active_confd_host, new_active_site=active_site_name,
+ new_standby_ip=standby_confd_host, new_standby_site=standby_site_name)
+
+ def test_wait_for_standby_to_comeup(self, active_mgmt_session, active_confd_host, standby_confd_host):
+ """Wait for the standby to come up; Wait for endpoint 'ha/geographic/active' to return 200"""
+ mano.wait_for_standby_to_become_active(active_mgmt_session)
+ # mano.verify_hagr_endpoints(active_host=standby_confd_host, standby_host=active_confd_host)
+
+ def test_collect_original_active_lp_data(self, active_lp_node_obj, logger):
+ active_lp_node_obj.session = None
+ logger.debug('Collecting data for host {}'.format(active_lp_node_obj.host))
+ active_lp_node_obj.collect_data()
+
+ def test_match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+ standby_lp_node_obj.compare(active_lp_node_obj)
+
+ def test_delete_default_project(self, rw_active_conman_proxy):
+ rift.auto.mano.delete_project(rw_active_conman_proxy, 'default')
+
+ def test_users_presence_in_active(self, rw_active_user_proxy, user_keyed_xpath, user_domain):
+ """Users were not deleted as part of Teardown; Check those users should be present and delete them"""
+ user_config = rw_active_user_proxy.get_config('/user-config')
+ current_users_list = [user.user_name for user in user_config.user]
+
+ user_name_pfx = 'user_ha_'
+ original_test_users_list = [user_name_pfx+str(idx) for idx in range(1,9)]
+
+ assert set(original_test_users_list).issubset(current_users_list)
+
+ # Delete the users
+ for idx in range(1,9):
+ rw_active_user_proxy.delete_config(
+ user_keyed_xpath.format(user=quoted_key(user_name_pfx + str(idx)), domain=quoted_key(user_domain)))
+
+ def test_projects_deleted(self, test_projects, project_keyed_xpath, rw_active_conman_proxy):
+ """There should only be the default project; all other test projects are already deleted as part of Teardown"""
+ for project_name in test_projects:
+ project_ = rw_active_conman_proxy.get_config(
+ project_keyed_xpath.format(project_name=quoted_key(project_name)) + '/name')
+ assert project_ is None
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwCloudYang,
+ RwConmanYang,
+)
+
+@pytest.fixture(scope='module')
+def test_project():
+ return 'project_ha'
+
+@pytest.mark.setup('active-configuration')
+@pytest.mark.incremental
+class TestMutipleFailoverActiveSetup(object):
+ def test_create_project_users(self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+ rw_active_project_proxy, rw_active_rbac_int_proxy, rw_active_conman_proxy, test_project, user_roles):
+ # Create test users
+ user_name_pfx = 'user_ha_'
+ users = []
+ for idx in range(1, 9):
+ users.append(user_name_pfx+str(idx))
+ mano.create_user(rw_active_user_proxy, user_name_pfx+str(idx), rbac_user_passwd, user_domain)
+
+ # Create a test project and assign roles to users in the newly created project
+ logger.debug('Creating project {}'.format(test_project))
+ mano.create_project(rw_active_conman_proxy, test_project)
+
+ for _ in range(8):
+ role = random.choice(user_roles)
+ user = users.pop()
+ logger.debug('Assinging role {} to user {} in project {}'.format(role, user, test_project))
+ mano.assign_project_role_to_user(rw_active_project_proxy, role, user, test_project, user_domain,
+ rw_active_rbac_int_proxy)
+
+ def test_create_cloud_account(self, cloud_account, fmt_prefixed_cloud_xpath, fmt_cloud_xpath, rw_active_cloud_pxy,
+ test_project, logger):
+ logger.debug('Creating cloud account {} for project {}'.format(cloud_account.name, test_project))
+ xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(test_project),
+ account_name=quoted_key(cloud_account.name))
+ rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+ xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(test_project),
+ account_name=quoted_key(cloud_account.name))
+ response = rw_active_cloud_pxy.get(xpath_no_pfx)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+ rw_active_cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(test_project), account_name=quoted_key(
+ cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+ def test_onboard_descriptors(self, descriptors, test_project, active_mgmt_session, fmt_nsd_catalog_xpath, logger):
+ # Uploads the descriptors
+ pingpong_descriptors = descriptors['pingpong']
+ for descriptor in pingpong_descriptors:
+ logger.debug('Onboarding descriptor {} for project {}'.format(descriptor, test_project))
+ rift.auto.descriptor.onboard(active_mgmt_session, descriptor, project=test_project)
+
+ # Verify whether the descriptors uploaded successfully
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+
+ def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_project):
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+ logger.debug('Instantiating NS for project {}'.format(test_project))
+ rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=test_project)
+
+
+@pytest.mark.depends('active-configuration')
+@pytest.mark.setup('multiple-failovers')
+@pytest.mark.incremental
+class TestHaMultipleFailovers(object):
+ def test_ha_multiple_failovers(self, revertive_pref_host, active_confd_host, standby_confd_host, standby_lp_node_obj, active_lp_node_obj, logger,
+ fmt_cloud_xpath, cloud_account, test_project, active_site_name, standby_site_name, standby_mgmt_session, active_mgmt_session, descriptors):
+ count, failover_count = 1, 10
+ current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session
+ current_actv_lp_node_obj = active_lp_node_obj
+
+ descriptor_list = descriptors['haproxy'][::-1] + descriptors['vdud_cfgfile'][::-1]
+
+ original_active_as_standby_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip': standby_confd_host, 'new_active_site': standby_site_name,
+ 'new_standby_ip': active_confd_host, 'new_standby_site': active_site_name}
+ original_active_as_active_kwargs = {'revertive_pref_host': revertive_pref_host, 'new_active_ip':active_confd_host, 'new_active_site': active_site_name,
+ 'new_standby_ip': standby_confd_host, 'new_standby_site': standby_site_name}
+
+ while count <= failover_count:
+ kwargs = original_active_as_active_kwargs
+ if count%2 == 1:
+ kwargs = original_active_as_standby_kwargs
+
+ # upload descriptor
+ if count not in [5,6,7,8]:
+ descriptor = descriptor_list.pop()
+ rift.auto.descriptor.onboard(current_actv_mgmt_session, descriptor, project=test_project)
+
+ # Collect config, op-data from current active before doing a failover
+ current_actv_lp_node_obj.session = None
+ current_actv_lp_node_obj.collect_data()
+
+ time.sleep(5)
+ logger.debug('Failover Iteration - {}. Current standby {} will be the new active'.format(count, current_stdby_mgmt_session.host))
+ mano.indirect_failover(**kwargs)
+
+ last_actv_lp_node_obj = current_actv_lp_node_obj
+ current_actv_mgmt_session, current_stdby_mgmt_session = active_mgmt_session, standby_mgmt_session
+ current_actv_lp_node_obj = active_lp_node_obj
+ if count%2 == 1:
+ current_actv_lp_node_obj = standby_lp_node_obj
+ current_actv_mgmt_session, current_stdby_mgmt_session = standby_mgmt_session, active_mgmt_session
+
+ logger.debug('Waiting for the new active {} to come up'.format(current_actv_mgmt_session.host))
+ mano.wait_for_standby_to_become_active(current_actv_mgmt_session)
+
+ # Wait for NSR to become active
+ rw_new_active_cloud_pxy = current_actv_mgmt_session.proxy(RwCloudYang)
+ rwnsr_proxy = current_actv_mgmt_session.proxy(RwNsrYang)
+
+ rw_new_active_cloud_pxy.wait_for(
+ fmt_cloud_xpath.format(project=quoted_key(test_project), account_name=quoted_key(
+ cloud_account.name)) + '/connection-status/status', 'success', timeout=60, fail_on=['failure'])
+
+ nsr_opdata = rwnsr_proxy.get(
+ '/rw-project:project[rw-project:name={project}]/ns-instance-opdata'.format(
+ project=quoted_key(test_project)))
+ assert nsr_opdata
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name={project}]/ns-instance-opdata/nsr[ns-instance-config-ref={config_ref}]/config-status".format(
+ project=quoted_key(test_project), config_ref=quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+ # Collect config, op-data from new active
+ current_actv_lp_node_obj.session = None
+ current_actv_lp_node_obj.collect_data()
+
+ # Compare data between last active and current active
+ current_actv_lp_node_obj.compare(last_actv_lp_node_obj)
+ count += 1
+
+
+@pytest.mark.depends('multiple-failovers')
+@pytest.mark.incremental
+class TestHaOperationPostMultipleFailovers(object):
+ def test_instantiate_nsr(self, fmt_nsd_catalog_xpath, cloud_account, active_mgmt_session, logger, test_project):
+ """Check if a new NS instantiation goes through after multiple HA failovers.
+ It uses metadata cfgfile nsd for the instantiation.
+ There alreasy exists ping pong NS instantiation"""
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(test_project))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+ cfgfile_nsd = [nsd for nsd in nsd_catalog.nsd if 'cfgfile_nsd' in nsd.name][0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, cfgfile_nsd.name, cfgfile_nsd)
+
+ logger.debug('Instantiating cfgfile NS for project {}'.format(test_project))
+ rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=test_project)
+
+ def test_nsr_terminate(self, active_mgmt_session, logger, test_project):
+ """"""
+ rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+ rwvnfr_pxy = active_mgmt_session.proxy(RwVnfrYang)
+ rwvlr_pxy = active_mgmt_session.proxy(RwVlrYang)
+
+ logger.debug("Trying to terminate ping pong, cfgfile NSRs in project {}".format(test_project))
+ rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, test_project)
+
+ def test_delete_descriptors(self, active_mgmt_session, test_project, logger):
+ logger.info("Trying to delete the descriptors in project {}".format(test_project))
+ rift.auto.descriptor.delete_descriptors(active_mgmt_session, test_project)
+
+ def test_delete_cloud_accounts(self, active_mgmt_session, logger, test_project, cloud_account):
+ logger.info("Trying to delete the cloud-account in project {}".format(test_project))
+ rift.auto.mano.delete_cloud_account(active_mgmt_session, cloud_account.name, test_project)
+
+ def test_delete_projects(self, active_mgmt_session, test_project, logger):
+ rw_conman_proxy = active_mgmt_session.proxy(RwConmanYang)
+ logger.debug('Deleting project {}'.format(test_project))
+ rift.auto.mano.delete_project(rw_conman_proxy, test_project)
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import random
+import time
+
+import rift.auto.mano as mano
+import rift.auto.descriptor
+from gi.repository.RwKeyspec import quoted_key
+
+from gi.repository import (
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwProjectVnfdYang,
+ RwCloudYang
+)
+
+
+@pytest.mark.setup('active_configuration')
+@pytest.mark.incremental
+class TestActiveLpConfiguration(object):
+ """Setting up the configuration."""
+
+ def collect_active_lp_data(
+ self, active_lp_node_obj, active_confd_host,
+ standby_confd_host, logger):
+ """Collect active lp data."""
+ mano.verify_hagr_endpoints(active_confd_host, standby_confd_host)
+ active_lp_node_obj.collect_data()
+
+ def wait_for_standby_to_comeup(
+ self, standby_mgmt_session, active_confd_host, standby_confd_host):
+ """Wait for the standby to come up.
+
+ Wait for endpoint 'ha/geographic/active' to return 200
+ """
+ mano.wait_for_standby_to_become_active(standby_mgmt_session)
+ # mano.verify_hagr_endpoints(
+ # active_host=standby_confd_host, standby_host=active_confd_host)
+
+ def collect_standby_lp_data(
+ self, standby_lp_node_obj, standby_mgmt_session, cloud_account,
+ fmt_cloud_xpath, projects, fmt_nsd_catalog_xpath):
+ """Collect standby lp data."""
+ time.sleep(180)
+ rw_new_active_cloud_pxy = standby_mgmt_session.proxy(RwCloudYang)
+ nsd_pxy = standby_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_proxy = standby_mgmt_session.proxy(RwNsrYang)
+
+ for project_name in projects:
+ rw_new_active_cloud_pxy.wait_for(
+ fmt_cloud_xpath.format(
+ project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name)) +
+ '/connection-status/status', 'success',
+ timeout=60, fail_on=['failure'])
+
+ # nsd_catalog = nsd_pxy.get_config(
+ # fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+ # assert nsd_catalog
+
+ if pytest.config.getoption("--nsr-test"):
+ nsr_opdata = rwnsr_proxy.get(
+ '/rw-project:project[rw-project:name={project}]' +
+ '/ns-instance-opdata'.format(
+ project=quoted_key(project_name))
+ )
+
+ assert nsr_opdata
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = (
+ '/rw-project:project[rw-project:name={project}]' +
+ '/ns-instance-opdata/nsr[ns-instance-config-ref=' +
+ '{config_ref}]/config-status'.format(
+ project=quoted_key(project_name),
+ config_ref=quoted_key(nsr.ns_instance_config_ref))
+ )
+
+ rwnsr_proxy.wait_for(
+ xpath, "configured", fail_on=['failed'], timeout=400)
+
+ standby_lp_node_obj.collect_data()
+
+ def attempt_indirect_failover(
+ self, revertive_pref_host, active_confd_host, standby_confd_host,
+ active_site_name, standby_site_name, logger):
+ """Try indirect failover."""
+ time.sleep(5)
+ logger.debug(
+ 'Attempting first failover. Host {} will be new active'.format(
+ standby_confd_host))
+
+ mano.indirect_failover(
+ revertive_pref_host, new_active_ip=standby_confd_host,
+ new_active_site=standby_site_name,
+ new_standby_ip=active_confd_host,
+ new_standby_site=active_site_name)
+
+ def match_active_standby(self, active_lp_node_obj, standby_lp_node_obj):
+ """Compare active standby."""
+ active_lp_node_obj.compare(standby_lp_node_obj)
+
+ def test_create_project_users_cloud_acc(
+ self, rbac_user_passwd, user_domain, rw_active_user_proxy, logger,
+ rw_active_project_proxy, rw_active_rbac_int_proxy, cloud_account,
+ rw_active_conman_proxy, rw_active_cloud_pxy, user_roles,
+ fmt_prefixed_cloud_xpath, fmt_cloud_xpath, descriptors,
+ active_mgmt_session, fmt_nsd_catalog_xpath, active_lp_node_obj,
+ standby_lp_node_obj, active_confd_host, standby_confd_host,
+ revertive_pref_host, active_site_name, standby_site_name,
+ standby_mgmt_session):
+ """Create 3 of users, projects, cloud accounts, decriptors & nsrs."""
+ def failover_and_match():
+ """Try an indirect failover.
+
+ Match active and standby data
+ """
+ self.collect_active_lp_data(
+ active_lp_node_obj, active_confd_host,
+ standby_confd_host, logger)
+ self.attempt_indirect_failover(
+ revertive_pref_host, active_confd_host, standby_confd_host,
+ active_site_name, standby_site_name, logger)
+ self.wait_for_standby_to_comeup(
+ standby_mgmt_session, active_confd_host, standby_confd_host)
+ self.collect_standby_lp_data(
+ standby_lp_node_obj, standby_mgmt_session, cloud_account,
+ fmt_cloud_xpath, projects, fmt_nsd_catalog_xpath)
+ self.match_active_standby(active_lp_node_obj, standby_lp_node_obj)
+
+ def delete_data_set(idx):
+
+ rift.auto.descriptor.terminate_nsr(
+ rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger,
+ project=projects[idx])
+
+ rift.auto.descriptor.delete_descriptors(
+ active_mgmt_session, project_name)
+
+ rw_active_cloud_pxy.delete_config(
+ fmt_prefixed_cloud_xpath.format(
+ project=quoted_key(projects[idx]),
+ account_name=quoted_key(cloud_account.name)
+ )
+ )
+ response = rw_active_cloud_pxy.get(
+ fmt_cloud_xpath.format(
+ project=quoted_key(projects[idx]),
+ account_name=quoted_key(cloud_account.name)
+ )
+ )
+ assert response is None
+
+ mano.delete_project(rw_active_conman_proxy, projects[idx])
+ projects.pop()
+ mano.delete_user(rw_active_user_proxy, users[idx], user_domain)
+ users.pop()
+
+ # Create test users
+ user_name_pfx = 'user_ha_'
+ users = []
+ for idx in range(1, 4):
+ users.append(user_name_pfx + str(idx))
+
+ mano.create_user(
+ rw_active_user_proxy, user_name_pfx + str(idx),
+ rbac_user_passwd, user_domain)
+
+ # Create projects and assign roles to users
+ prj_name_pfx = 'prj_ha_'
+ projects = []
+ for idx in range(1, 4):
+ project_name = prj_name_pfx + str(idx)
+ projects.append(project_name)
+ mano.create_project(
+ rw_active_conman_proxy, project_name)
+
+ for idx in range(0, 3):
+ project_name = projects[idx]
+ role = random.choice(user_roles)
+ user = users[idx]
+ logger.debug(
+ 'Assinging role {} to user {} in project {}'.format(
+ role, user, project_name))
+
+ mano.assign_project_role_to_user(
+ rw_active_project_proxy, role, user, project_name,
+ user_domain, rw_active_rbac_int_proxy)
+
+ logger.debug(
+ 'Creating cloud account {} for project {}'.format(
+ cloud_account.name, project_name))
+
+ xpath = fmt_prefixed_cloud_xpath.format(
+ project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name))
+
+ rw_active_cloud_pxy.replace_config(xpath, cloud_account)
+
+ xpath_no_pfx = fmt_cloud_xpath.format(
+ project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name))
+
+ response = rw_active_cloud_pxy.get(xpath_no_pfx)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+ rw_active_cloud_pxy.wait_for(
+ fmt_cloud_xpath.format(
+ project=quoted_key(project_name),
+ account_name=quoted_key(cloud_account.name)) +
+ '/connection-status/status', 'success', timeout=30,
+ fail_on=['failure'])
+
+ # Uploads the descriptors
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(
+ active_mgmt_session, descriptor, project=project_name)
+
+ # Verify whether the descriptors uploaded successfully
+ logger.debug(
+ 'Onboarding descriptors for project {}'.format(project_name))
+
+ nsd_pxy = active_mgmt_session.proxy(RwProjectNsdYang)
+ rwnsr_pxy = active_mgmt_session.proxy(RwNsrYang)
+ rwvnfr_pxy = active_mgmt_session.proxy(RwVnfrYang)
+ rwvlr_pxy = active_mgmt_session.proxy(RwVlrYang)
+
+ nsd_xpath = fmt_nsd_catalog_xpath.format(
+ project=quoted_key(project_name))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+
+ nsd_xpath = fmt_nsd_catalog_xpath.format(
+ project=quoted_key(project_name))
+ nsd_catalog = nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account.name, nsd.name, nsd)
+
+ logger.debug(
+ 'Instantiating NS for project {}'.format(project_name))
+ rift.auto.descriptor.instantiate_nsr(
+ nsr, rwnsr_pxy, logger, project=project_name)
+
+ delete_data_set(2)
+ failover_and_match()
+ delete_data_set(1)
+ failover_and_match()
+
+
# limitations under the License.
#
+import gi
import pytest
-from gi.repository import NsrYang, RwNsrYang, RwVnfrYang, NsdYang, RwNsdYang
+from gi.repository import (
+ NsrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwProjectNsdYang,
+ )
import rift.auto.session
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.fixture(scope='module')
def proxy(request, mgmt_session):
return mgmt_session.proxy
-ScalingGroupInstance = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance
-ScalingGroup = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup
+ScalingGroupInstance = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance
+ScalingGroup = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup
INSTANCE_ID = 1
proxy (Callable): Proxy for launchpad session.
state (str): Expected state
"""
- nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+ nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsr = nsr_opdata.nsr[0]
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.ns_instance_config_ref))
proxy(RwNsrYang).wait_for(xpath, state, timeout=240)
def verify_scaling_group(self, proxy, group_name, expected_records_count, scale_out=True):
2. Status of the scaling group
3. New vnfr record has been created.
"""
- nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+ nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsr_id = nsr_opdata.nsr[0].ns_instance_config_ref
- xpath = ('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'
- '/scaling-group-record[scaling-group-name-ref="{}"]').format(
- nsr_id, group_name)
+ xpath = ('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'
+ '/scaling-group-record[scaling-group-name-ref={}]').format(
+ quoted_key(nsr_id), quoted_key(group_name))
scaling_record = proxy(NsrYang).get(xpath)
for vnfr in instance.vnfrs:
vnfr_record = proxy(RwVnfrYang).get(
- "/vnfr-catalog/vnfr[id='{}']".format(vnfr))
+ "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]".format(quoted_key(vnfr)))
assert vnfr_record is not None
def verify_scale_up(self, proxy, group_name, expected):
"""Wait till the NSR state moves to configured before starting scaling
tests.
"""
- nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+ nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
current_nsr = nsrs[0]
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(current_nsr.ns_instance_config_ref))
proxy(RwNsrYang).wait_for(xpath, "configured", timeout=240)
def test_min_max_scaling(self, proxy):
- nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+ nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
nsd_id = nsrs[0].nsd_ref
nsr_id = nsrs[0].ns_instance_config_ref
# group_name = "http_client_group"
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/scaling-group-record".format(nsr_id)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/scaling-group-record".format(quoted_key(nsr_id))
scaling_records = proxy(RwNsrYang).get(xpath, list_obj=True)
for scaling_record in scaling_records.scaling_group_record:
group_name = scaling_record.scaling_group_name_ref
- xpath = "/nsd-catalog/nsd[id='{}']/scaling-group-descriptor[name='{}']".format(
- nsd_id, group_name)
- scaling_group_desc = proxy(NsdYang).get(xpath)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]/scaling-group-descriptor[name={}]".format(
+ quoted_key(nsd_id), quoted_key(group_name))
+ scaling_group_desc = proxy(RwProjectNsdYang).get(xpath)
# Add + 1 to go beyond the threshold
for instance_id in range(1, scaling_group_desc.max_instance_count + 1):
- xpath = '/ns-instance-config/nsr[id="{}"]/scaling-group[scaling-group-name-ref="{}"]'.format(
- nsr_id,
- group_name)
+ xpath = '/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr[id={}]/scaling-group[scaling-group-name-ref={}]'.format(
+ quoted_key(nsr_id),
+ quoted_key(group_name))
instance = ScalingGroupInstance.from_dict({"id": instance_id})
scaling_group = proxy(NsrYang).get(xpath)
assert instance_id == scaling_group_desc.max_instance_count
for instance_id in range(1, scaling_group_desc.max_instance_count):
- xpath = ('/ns-instance-config/nsr[id="{}"]/scaling-group'
- '[scaling-group-name-ref="{}"]/'
- 'instance[id="{}"]').format(
- nsr_id, group_name, instance_id)
+ xpath = ('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr[id={}]/scaling-group'
+ '[scaling-group-name-ref={}]/'
+ 'instance[id={}]').format(
+ quoted_key(nsr_id), quoted_key(group_name), quoted_key(instance_id))
proxy(NsrYang).delete_config(xpath)
self.verify_scale_in(proxy, group_name, instance_id)
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@file test_accounts_framework.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Test logical account usage with vim and ro
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+import rift.mano.examples.ping_pong_nsd
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+ NsrYang,
+ RwProjectNsdYang,
+ VnfrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwBaseYang,
+)
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='session')
+def descriptors_pingpong():
+ return rift.mano.examples.ping_pong_nsd.generate_ping_pong_descriptors(pingcount=1)
+
+@pytest.fixture(scope='session')
+def packages_pingpong(descriptors_pingpong):
+ return rift.auto.descriptor.generate_descriptor_packages(descriptors_pingpong)
+
+def VerifyAllInstancesRunning(mgmt_session):
+ ''' Verifies all network service instances reach running operational status '''
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{ns_instance_config_ref}']/operational-status"
+ ).format(
+ ns_instance_config_ref=nsr.ns_instance_config_ref
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+def VerifyAllInstancesConfigured(mgmt_session):
+ ''' Verifies all network service instances reach configured config status '''
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status"
+ ).format(
+ nsr.ns_instance_config_ref
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.setup('descriptors')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+ def test_onboard(self, mgmt_session, packages_pingpong):
+ for descriptor_package in packages_pingpong:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestInstantiateVim:
+ def test_instantiate_vim(self, mgmt_session, cloud_account_name):
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = nsd_catalog.nsd[0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_vim",
+ nsd,
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestInstantiateRo:
+ def test_instantiate_ro(self, mgmt_session, cloud_account_name, ro_map):
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = nsd_catalog.nsd[0]
+
+ resource_orchestrator, datacenter = ro_map[cloud_account_name]
+ nsr = rift.auto.descriptor.create_nsr(
+ datacenter,
+ "pp_ro",
+ nsd,
+ resource_orchestrator=resource_orchestrator
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import os
+
+import rift.auto.descriptor
+import rift.auto.mano as mano
+
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+
+from gi.repository import (
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwProjectVnfdYang,
+ RwCloudYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+class TestFloatingIP(object):
+ """TestFloatingIP."""
+
+ # After RIFTIT-909 is completed this test will be set to working
+ valid_pool_names = ['FIP_SYSTEST_POOL_LARGE', 'public']
+ invalid_pool_names = ['', 'FIP_SYSTEST_POOL_EMPTY', 'invalid']
+
+ def create_cloud_account(
+ self, cloud_host, cloud_user, cloud_tenants, vim_ssl_enabled,
+ idx, mgmt_session):
+ """create_cloud_account."""
+ for cloud_tenant in cloud_tenants:
+ floating_ip_pool_names = (
+ self.valid_pool_names + self.invalid_pool_names)
+ project_name = 'float_project_{}'.format(idx)
+ password = 'mypasswd'
+ auth_url = 'http://{host}:5000/v3/'.format(host=cloud_host)
+ if vim_ssl_enabled is True:
+ auth_url = 'https://{host}:5000/v3/'.format(host=cloud_host)
+ mgmt_network = os.getenv('MGMT_NETWORK', 'private')
+ cloud_acc_name = 'cloud_account'
+ pool_name = floating_ip_pool_names[idx - 1]
+ cloud_account = (
+ RwCloudYang.
+ YangData_RwProject_Project_Cloud_Account.from_dict({
+ 'name': cloud_acc_name,
+ 'account_type': 'openstack',
+ 'openstack': {
+ 'admin': True,
+ 'key': cloud_user,
+ 'secret': password,
+ 'auth_url': auth_url,
+ 'tenant': cloud_tenant,
+ 'mgmt_network': mgmt_network,
+ 'floating_ip_pool': pool_name,
+ }
+ }))
+ mano.create_cloud_account(
+ mgmt_session, cloud_account, project_name=project_name)
+
+ def yield_vnfd_vnfr_pairs(self, proxy, nsr=None):
+ """
+ Yield tuples of vnfd & vnfr entries.
+
+ Args:
+ proxy (callable): Launchpad proxy
+ nsr (optional): If specified, only the vnfr & vnfd records of the
+ NSR are returned
+
+ Yields:
+ Tuple: VNFD and its corresponding VNFR entry
+ """
+ def get_vnfd(vnfd_id):
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/" +
+ "vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id)))
+ return proxy(RwProjectVnfdYang).get(xpath)
+
+ vnfr = (
+ "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr")
+ vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
+ for vnfr in vnfrs.vnfr:
+
+ if nsr:
+ const_vnfr_ids = [const_vnfr.vnfr_id for const_vnfr in nsr.constituent_vnfr_ref]
+ if vnfr.id not in const_vnfr_ids:
+ continue
+
+ vnfd = get_vnfd(vnfr.vnfd.id)
+ yield vnfd, vnfr
+
+ def test_floating_ip(
+ self, rw_user_proxy, rbac_user_passwd, user_domain, logger,
+ rw_project_proxy, rw_rbac_int_proxy, descriptors, mgmt_session,
+ cloud_user, cloud_tenants, vim_ssl_enabled, cloud_host,
+ fmt_nsd_catalog_xpath):
+ """test_floating_ip."""
+ proxy = mgmt_session.proxy
+ no_of_pool_name_cases = (
+ len(self.valid_pool_names + self.invalid_pool_names) + 1)
+ for idx in range(1, no_of_pool_name_cases):
+ project_name = 'float_project_{}'.format(idx)
+ user_name = 'float_user_{}'.format(idx)
+ project_role = 'rw-project:project-admin'
+ cloud_acc_name = 'cloud_account'
+ mano.create_user(
+ rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+ mano.assign_project_role_to_user(
+ rw_project_proxy, project_role, user_name, project_name,
+ user_domain, rw_rbac_int_proxy)
+
+ self.create_cloud_account(
+ cloud_host, cloud_user, cloud_tenants,
+ vim_ssl_enabled, idx, mgmt_session)
+
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(
+ mgmt_session, descriptor, project=project_name)
+
+ nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = nsd_pxy.get_config(
+ fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_acc_name, nsd.name, nsd)
+ rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+
+ try:
+ rift.auto.descriptor.instantiate_nsr(
+ nsr, rwnsr_pxy, logger, project=project_name)
+ except(Exception):
+ continue
+ for vnfd, vnfr in self.yield_vnfd_vnfr_pairs(proxy):
+ if idx > len(self.valid_pool_names):
+ assert vnfr.vdur[0].management_ip is None
+ else:
+ vnfr.vdur[0].management_ip is not None
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@file test_launchpad.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 07/07/2016
+@brief High-availibility system test that runs ping pong workflow
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+ NsrYang,
+ RwProjectNsdYang,
+ VnfrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwBaseYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.mark.setup('seed_random')
+class TestSeedRandom:
+ def test_seed_random(self, random_seed):
+ logger.info("Seeding number generator with seed {}".format(random_seed))
+ random.seed(random_seed)
+
+class MaxRetriesExceededException(Exception):
+ '''Indicates the maximum allowed number of retries has been exceeded for an operation
+ '''
+ pass
+
+class HAVerifyException(Exception):
+ '''Indicates a failure to verify correct HA behaviour
+ '''
+ pass
+
+
+class HASession:
+ ''' Wrapper around management session, which kills off system components
+ in order to trigger HA functionality
+ '''
+
+ DEFAULT_ATTEMPTS=3
+ DEFAULT_MIN_DELAY=0.0
+ DEFAULT_MAX_DELAY=1
+ DEFAULT_FREQUENCY=1
+ DEFAULT_RECOVERY_TIMEOUT=120
+
+ def __init__(self, session):
+ ''' Create a new HASession instance
+
+ Returns:
+ instance of HASession
+ '''
+ self.session = session
+ self.set_config()
+
+ @contextmanager
+ def config(self, *args, **kwargs):
+ ''' Context manager to allow HASession to temporarily have its config modified
+ '''
+ current_config = self.get_config()
+ self.set_config(*args, **kwargs)
+ yield
+ self.set_config(*current_config)
+
+ def get_config(self):
+ ''' Returns the current HA session config
+ '''
+ return (self.attempts, self.min_delay, self.max_delay, self.ha_frequency, self.recovery_timeout)
+
+ def set_config(self, attempts=None, min_delay=None, max_delay=None, ha_frequency=None, recovery_timeout=None):
+ ''' Set the HA session config, set default values for all config options not provided
+
+ Arguments:
+ attempts - Number of times to attempt an operation before failing
+ min_delay - minimum time that must elapse before session is allowed to kill a component
+ max_delay - maximum time that may elapse before killing a component
+ ha_frequency - frequency at which operations are tested for ha
+ recovery_timeout - time allowed for system to recovery after a component is killed
+ '''
+ if not attempts:
+ attempts = HASession.DEFAULT_ATTEMPTS
+ if not min_delay:
+ min_delay = HASession.DEFAULT_MIN_DELAY
+ if not max_delay:
+ max_delay = HASession.DEFAULT_MAX_DELAY
+ if not ha_frequency:
+ ha_frequency = HASession.DEFAULT_FREQUENCY
+ if not recovery_timeout:
+ recovery_timeout = HASession.DEFAULT_RECOVERY_TIMEOUT
+
+ self.attempts = attempts
+ self.min_delay = min_delay
+ self.max_delay = max_delay
+ self.ha_frequency = ha_frequency
+ self.recovery_timeout = recovery_timeout
+
+ def call(self, operation, *args, **kwargs):
+ ''' Call an operation using the wrapped management session, then
+ kill off a system component, and verify the operation still succeeds
+
+ Arguments:
+ operation - operation to be invoked
+ '''
+ # Choose to make the normal session call or do the HA test
+ if random.choice(range(0,int(1/self.ha_frequency))) != 0:
+ return operation(*args, **kwargs)
+
+ # Make sure we're starting from a running system
+ rift.vcs.vcs.wait_until_system_started(self.session)
+
+ def choose_any_tasklet(vcs_info):
+ tasklets = [component_info.component_name for component_info in vcs_info.components.component_info]
+ return random.choice(tasklets)
+
+ def choose_restartable_tasklet(vcs_info):
+ restartable_tasklets = [
+ component_info.component_name
+ for component_info in vcs_info.components.component_info
+ if component_info.recovery_action == 'RESTART'
+ and component_info.component_type == 'RWTASKLET'
+ ]
+ return random.choice(restartable_tasklets)
+
+ vcs_info = self.session.proxy(RwBaseYang).get('/vcs/info')
+ component_name = choose_restartable_tasklet(vcs_info)
+
+ ssh_cmd = 'ssh {} -o StrictHostKeyChecking=no -o BatchMode=yes'.format(self.session.host)
+ def get_component_process_pid(component_name):
+ cmd = '{} -- \'ps -ef | grep -v "grep" | grep rwmain | grep "{}" | tr -s " " | cut -d " " -f 2\''.format(ssh_cmd, component_name)
+ logger.info("Finding component [{}] pid using cmd: {}".format(component_name, cmd))
+ output = subprocess.check_output(cmd, shell=True)
+ return output.decode('ascii').strip()
+ process_pid = get_component_process_pid(component_name)
+ logger.info('{} has pid {}'.format(component_name, process_pid))
+
+ # Kick off a background process to kill the tasklet after some delay
+ delay = self.min_delay + (self.max_delay-self.min_delay)*random.random()
+ logger.info("Killing {} [{}] in {}".format(component_name, process_pid, delay))
+ cmd = '(sleep {} && {} -- "sudo kill -9 {}") &'.format(delay, ssh_cmd, process_pid)
+ os.system(cmd)
+
+ # Invoke session operation
+ now = time.time()
+ result = None
+ attempt = 0
+ while attempt < self.attempts:
+ try:
+ result = operation(*args, **kwargs)
+ # Possible improvement: implement optional verify step here
+ break
+ except Exception:
+ logger.error('operation failed - {}'.format(operation))
+ attempt += 1
+ # If the operation failed, wait until recovery occurs to re-attempt
+ rift.vcs.vcs.wait_until_system_started(self.session)
+
+ if attempt >= self.attempts:
+ raise MaxRetriesExceededException("Killed %s [%d] - Subsequently failed operation : %s %s %s", component_name, process_pid, operation, args, kwargs )
+
+ # Wait until kill has definitely happened
+ elapsed = now - time.time()
+ remaining = delay - elapsed
+ if remaining > 0:
+ time.sleep(remaining)
+ time.sleep(3)
+
+ # Verify system reaches running status again
+ rift.vcs.vcs.wait_until_system_started(self.session)
+
+ # TODO: verify the tasklet process was actually restarted (got a new pid)
+ new_pid = get_component_process_pid(component_name)
+ if process_pid == new_pid:
+ raise HAVerifyException("Process pid unchanged : %d == %d ~ didn't die?" % (process_pid, new_pid))
+
+ return result
+
+@pytest.fixture
+def ha_session(mgmt_session):
+ return HASession(mgmt_session)
+
+@pytest.mark.depends('seed_random')
+@pytest.mark.setup('launchpad')
+@pytest.mark.incremental
+class TestLaunchpadSetup:
+ def test_create_cloud_accounts(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+ '''Configure cloud accounts
+
+ Asserts:
+ Cloud name and cloud type details
+ '''
+ for cloud_account in cloud_accounts:
+ xpath = "{cloud_xpath}[name={cloud_account_name}]".format(
+ cloud_xpath=cloud_xpath,
+ cloud_account_name=quoted_key(cloud_account.name)
+ )
+ ha_session.call(mgmt_session.proxy(cloud_module).replace_config, xpath, cloud_account)
+ response = ha_session.call(mgmt_session.proxy(cloud_module).get, xpath)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+@pytest.mark.teardown('launchpad')
+@pytest.mark.incremental
+class TestLaunchpadTeardown:
+ def test_delete_cloud_accounts(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+ '''Unconfigure cloud_account'''
+ for cloud_account in cloud_accounts:
+ xpath = "{cloud_xpath}[name={cloud_account_name}]".format(
+ cloud_xpath=cloud_xpath,
+ cloud_account_name=quoted_key(cloud_account.name)
+ )
+ ha_session.call(mgmt_session.proxy(cloud_module).delete_config, xpath)
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+ def test_onboard(self, ha_session, mgmt_session, descriptors):
+ for descriptor in descriptors:
+ with ha_session.config(max_delay=15):
+ ha_session.call(rift.auto.descriptor.onboard, mgmt_session, descriptor)
+
+ def test_instantiate(self, ha_session, mgmt_session, cloud_account_name):
+ catalog = ha_session.call(mgmt_session.proxy(RwProjectNsdYang).get_config, '/nsd-catalog')
+ nsd = catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account_name, "pingpong_1", nsd)
+ ha_session.call(mgmt_session.proxy(RwNsrYang).create_config, '/ns-instance-config/nsr', nsr)
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.teardown('pingpong')
+@pytest.mark.incremental
+class TestTeardownPingpong(object):
+ def test_teardown(self, ha_session, mgmt_session):
+ ns_instance_config = ha_session.call(mgmt_session.proxy(RwNsrYang).get_config, '/ns-instance-config')
+ for nsr in ns_instance_config.nsr:
+ ha_session.call(mgmt_session.proxy(RwNsrYang).delete_config, "/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
+
+ time.sleep(60)
+ vnfr_catalog = ha_session.call(mgmt_session.proxy(RwVnfrYang).get, '/vnfr-catalog')
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestLaunchpad:
+ def test_account_connection_status(self, ha_session, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+ '''Verify connection status on each cloud account
+
+ Asserts:
+ Cloud account is successfully connected
+ '''
+ for cloud_account in cloud_accounts:
+ with ha_session.config(attempts=2):
+ ha_session.call(
+ mgmt_session.proxy(cloud_module).wait_for,
+ '{}[name={}]/connection-status/status'.format(cloud_xpath, quoted_key(cloud_account.name)),
+ 'success',
+ timeout=60,
+ fail_on=['failure']
+ )
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.incremental
+class TestPingpong:
+ def test_service_started(self, ha_session, mgmt_session):
+ nsr_opdata = ha_session.call(mgmt_session.proxy(RwNsrYang).get, '/ns-instance-opdata')
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = (
+ "/ns-instance-opdata/nsr[ns-instance-config-ref={ns_instance_config_ref}]/operational-status"
+ ).format(
+ ns_instance_config_ref=quoted_key(nsr.ns_instance_config_ref)
+ )
+
+ with ha_session.config(attempts=2, max_delay=60):
+ ha_session.call(mgmt_session.proxy(RwNsrYang).wait_for, xpath, "running", fail_on=['failed'], timeout=300)
+
+ def test_service_configured(self, ha_session, mgmt_session):
+ nsr_opdata = ha_session.call(mgmt_session.proxy(RwNsrYang).get, '/ns-instance-opdata')
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = (
+ "/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status"
+ ).format(
+ quoted_key(nsr.ns_instance_config_ref)
+ )
+
+ with ha_session.config(attempts=2, max_delay=60):
+ ha_session.call(mgmt_session.proxy(RwNsrYang).wait_for, xpath, "configured", fail_on=['failed'], timeout=300)
+
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@file test_input_params.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Test of VNF Input parameters using ping pong
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+ NsrYang,
+ RwProjectNsdYang,
+ VnfrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwBaseYang,
+)
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.fixture(scope='session')
+def global_vendor_name():
+ return 'global_vendor'
+
+@pytest.fixture(scope='session')
+def ping_custom_vendor_name():
+ return 'ping_vendor'
+
+@pytest.fixture(scope='session')
+def pong_custom_vendor_name():
+ return 'pong_vendor'
+
+@pytest.fixture(scope='session')
+def ping_custom_init_data():
+ return 'ping_custom_init_data'
+
+@pytest.fixture(scope='session')
+def pong_custom_init_data():
+ return 'pong_custom_init_data'
+
+@pytest.fixture(scope='session')
+def ping_custom_meta_data():
+ return 'ping_custom_meta_data'
+
+@pytest.fixture(scope='session')
+def pong_custom_meta_data():
+ return 'pong_custom_meta_data'
+
+@pytest.fixture(scope='session')
+def ping_custom_script_init_data():
+ return 'ping'
+
+@pytest.fixture(scope='session')
+def pong_custom_script_init_data():
+ return 'pong'
+
+@pytest.fixture(scope='session')
+def ping_descriptor(descriptors_pingpong_vnf_input_params):
+ return descriptors_pingpong_vnf_input_params[0]
+
+@pytest.fixture(scope='session')
+def pong_descriptor(descriptors_pingpong_vnf_input_params):
+ return descriptors_pingpong_vnf_input_params[1]
+
+@pytest.fixture(scope='session')
+def ping_pong_descriptor(descriptors_pingpong_vnf_input_params):
+ return descriptors_pingpong_vnf_input_params[2]
+
+@pytest.fixture(scope='session')
+def ping_id(ping_descriptor):
+ return ping_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def pong_id(pong_descriptor):
+ return pong_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def ping_script_descriptor(descriptors_pingpong_script_input_params):
+ return descriptors_pingpong_script_input_params[0]
+
+@pytest.fixture(scope='session')
+def pong_script_descriptor(descriptors_pingpong_script_input_params):
+ return descriptors_pingpong_script_input_params[1]
+
+@pytest.fixture(scope='session')
+def ping_pong_script_descriptor(descriptors_pingpong_script_input_params):
+ return descriptors_pingpong_script_input_params[2]
+
+@pytest.fixture(scope='session')
+def ping_script_id(ping_script_descriptor):
+ return ping_script_descriptor.vnfd.id
+
+@pytest.fixture(scope='session')
+def pong_script_id(pong_script_descriptor):
+ return pong_script_descriptor.vnfd.id
+
+
+def VerifyAllInstancesRunning(mgmt_session):
+ ''' Verifies all network service instances reach running operational status '''
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{ns_instance_config_ref}']/operational-status"
+ ).format(
+ ns_instance_config_ref=nsr.ns_instance_config_ref
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+def VerifyAllInstancesConfigured(mgmt_session):
+ ''' Verifies all network service instances reach configured config status '''
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status"
+ ).format(
+ nsr.ns_instance_config_ref
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('launchpad')
+@pytest.mark.setup('descriptors')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+ def test_onboard_custom_descriptors(self, mgmt_session, packages_pingpong_vnf_input_params, packages_pingpong_script_input_params):
+ for descriptor_package in packages_pingpong_vnf_input_params:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+ for descriptor_package in packages_pingpong_script_input_params:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor_package)
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestGlobalVnfInputParams:
+ def test_instantiate(self, mgmt_session, cloud_account_name, global_vendor_name):
+ ''' Testing vnf input parameters with broadest xpath expression allowed
+
+ /vnfd:vnfd-catalog/vnfd:vnfd/<leaf>
+
+ Expected to replace the leaf in all member VNFs
+ '''
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd/vnfd:vendor"
+ value = global_vendor_name
+ vnf_input_parameter = rift.auto.descriptor.create_vnf_input_parameter(xpath, value)
+
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_input_params_1",
+ nsd,
+ vnf_input_param_list=[vnf_input_parameter]
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
+ def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, global_vendor_name):
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+ ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+ pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+ ping_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % ping_vnfr.id)
+ assert ping_vendor_name == global_vendor_name
+ pong_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % pong_vnfr.id)
+ assert pong_vendor_name == global_vendor_name
+
+ def test_teardown(self, mgmt_session):
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+ for nsr in ns_instance_config.nsr:
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+ time.sleep(60)
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParams:
+ def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_vendor_name, pong_custom_vendor_name):
+ ''' Testing vnf input parameters with member specific xpath expression
+
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+
+ Expected to replace the leaf in a specific member VNF
+ '''
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vendor" % (ping_id)
+ value = ping_custom_vendor_name
+ vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vendor" % (pong_id)
+ value = pong_custom_vendor_name
+ vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_input_params_2",
+ nsd,
+ vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
+ def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_vendor_name, pong_custom_vendor_name):
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+ ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+ pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+ ping_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % ping_vnfr.id)
+ assert ping_vendor_name == ping_custom_vendor_name
+ pong_vendor_name = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vendor" % pong_vnfr.id)
+ assert pong_vendor_name == pong_custom_vendor_name
+
+ def test_teardown(self, mgmt_session):
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+ for nsr in ns_instance_config.nsr:
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+ time.sleep(60)
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParamsCloudInit:
+ def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_init_data, pong_custom_init_data):
+ ''' Testing vnf input parameters with node specific xpath expression
+
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+
+ Expected to replace the leaf in a specific member VNF
+ '''
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value" % (ping_id)
+ value = ping_custom_init_data
+ vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_init_data']/vnfd:value" % (pong_id)
+ value = pong_custom_init_data
+ vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_input_params_3",
+ nsd,
+ vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
+ def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_init_data, pong_custom_init_data):
+ ''' Verify both ping and pong init data were replaced with their respective init data
+ '''
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+ ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+ pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+
+ # Verify the data was replaced in the vdu
+ ping_init_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_init_data']/value" % (ping_vnfr.id))
+ assert ping_init_data == ping_custom_init_data
+ pong_init_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_init_data']/value" % (pong_vnfr.id))
+ assert pong_init_data == pong_custom_init_data
+
+ def test_teardown(self, mgmt_session):
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+ for nsr in ns_instance_config.nsr:
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+ time.sleep(60)
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+class TestMemberVnfInputParamsCloudMeta:
+ def test_instantiate(self, mgmt_session, cloud_account_name, ping_id, pong_id, ping_custom_meta_data, pong_custom_meta_data):
+ ''' Testing vnf input parameters with node specific xpath expression
+
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+
+ Expected to replace the leaf in a specific member VNF
+ '''
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value" % (ping_id)
+ value = ping_custom_meta_data
+ vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_id)
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='custom_cloud_meta_data']/vnfd:value" % (pong_id)
+ value = pong_custom_meta_data
+ vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_id)
+
+
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_input_nsd'][0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_input_params_4",
+ nsd,
+ vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ VerifyAllInstancesConfigured(mgmt_session)
+
+ def test_verify_vnf_input_parameters(self, mgmt_session, ping_id, pong_id, ping_custom_meta_data, pong_custom_meta_data):
+ ''' Verify both ping and pong meta data were replaced with their respective meta data
+ '''
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog")
+ ping_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == ping_id][0]
+ pong_vnfr = [vnfr for vnfr in vnfr_catalog.vnfr if vnfr.vnfd.id == pong_id][0]
+
+ # Verify the data was replaced in the vdu
+ ping_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (ping_vnfr.id))
+ assert ping_meta_data == ping_custom_meta_data
+ pong_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vnfd/vdu/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (pong_vnfr.id))
+ assert pong_meta_data == pong_custom_meta_data
+
+ # Verify the data was also replaced in the vdur
+ ping_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vdur/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (ping_vnfr.id))
+ assert ping_meta_data == ping_custom_meta_data
+ pong_meta_data = mgmt_session.proxy(RwVnfrYang).get("/project[name='default']/vnfr-catalog/vnfr[id='%s']/vdur/supplemental-boot-data/custom-meta-data[name='custom_cloud_meta_data']/value" % (pong_vnfr.id))
+ assert pong_meta_data == pong_custom_meta_data
+
+ def test_teardown(self, mgmt_session):
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+ for nsr in ns_instance_config.nsr:
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id='{}']".format(nsr.id))
+ time.sleep(60)
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
+
+@pytest.mark.depends('descriptors')
+@pytest.mark.incremental
+@pytest.mark.skipif(True, reason='RIFT-18171 - Disabled due to cloud init failure on userdata supplied bash scripts')
+class TestMemberVnfInputParamsInitScripts:
+ def test_instantiate(self, mgmt_session, cloud_account_name, ping_script_id, pong_script_id, ping_custom_script_init_data, pong_custom_script_init_data):
+ ''' Testing replacement of vnf input parameters with node specific xpath expression in init scripts
+
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>']/vnfd:vdu[vnfd:id="<vdu-id>"]/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name=<leaf-name>]/vnfd:value
+ /vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='<member-id>'/<leaf>
+
+ Expected to replace the leaf in a specific member VNF
+ '''
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='CI-script-init-data']/vnfd:value" % (ping_script_id)
+ value = ping_custom_script_init_data
+ vnf_input_parameter_ping = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=ping_script_id)
+
+ xpath = "/vnfd:vnfd-catalog/vnfd:vnfd[vnfd:id='%s']/vnfd:vdu[vnfd:id='iovdu_0']/vnfd:supplemental-boot-data/vnfd:custom-meta-data[vnfd:name='CI-script-init-data']/vnfd:value" % (pong_script_id)
+ value = pong_custom_script_init_data
+ vnf_input_parameter_pong = rift.auto.descriptor.create_vnf_input_parameter(xpath, value, vnfd_id_ref=pong_script_id)
+
+ nsd_catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = [nsd for nsd in nsd_catalog.nsd if nsd.name == 'pp_script_nsd'][0]
+
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account_name,
+ "pp_input_params_5",
+ nsd,
+ vnf_input_param_list=[vnf_input_parameter_ping, vnf_input_parameter_pong]
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+
+ def test_verify_running(self, mgmt_session):
+ VerifyAllInstancesRunning(mgmt_session)
+
+ def test_verify_configured(self, mgmt_session):
+ # Configuration will only succeed if the replacement was sucessful
+ VerifyAllInstancesConfigured(mgmt_session)
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@file test_mro_pingpong.py
+@author Paul Laidler (Paul.Laidler@riftio.com)
+@date 06/21/2017
+@brief Multi-RO test that instantiates two ping pong instances on seperate ROs
+"""
+
+import gi
+import logging
+import os
+import pytest
+import random
+import re
+import subprocess
+import sys
+import time
+import uuid
+
+from contextlib import contextmanager
+
+import rift.auto.mano
+import rift.auto.session
+import rift.auto.descriptor
+
+gi.require_version('RwVnfrYang', '1.0')
+from gi.repository import (
+ NsrYang,
+ RwProjectNsdYang,
+ VnfrYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwBaseYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+logging.basicConfig(level=logging.DEBUG)
+logger = logging.getLogger(__name__)
+
+@pytest.mark.setup('pingpong')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestSetupPingpong(object):
+ def test_onboard(self, mgmt_session, descriptors):
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor)
+
+ def test_instantiate(self, mgmt_session, ro_account_info):
+ catalog = mgmt_session.proxy(RwProjectNsdYang).get_config("/rw-project:project[rw-project:name='default']/nsd-catalog")
+ nsd = catalog.nsd[0]
+ instance_id = 0
+ for resource_orchestrator, account_info in ro_account_info.items():
+ for datacenter in account_info['datacenters']:
+ nsr = rift.auto.descriptor.create_nsr(
+ datacenter,
+ "pingpong_{}".format(instance_id),
+ nsd,
+ resource_orchestrator=resource_orchestrator
+ )
+ mgmt_session.proxy(RwNsrYang).create_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr", nsr)
+ instance_id += 1
+
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.incremental
+class TestPingpong:
+ def test_service_started(self, mgmt_session):
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={ns_instance_config_ref}]/operational-status"
+ ).format(
+ ns_instance_config_ref=quoted_key(nsr.ns_instance_config_ref)
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=300)
+
+ def test_service_configured(self, mgmt_session):
+ nsr_opdata = mgmt_session.proxy(RwNsrYang).get("/rw-project:project[rw-project:name='default']/ns-instance-opdata")
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = (
+ "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status"
+ ).format(
+ quoted_key(nsr.ns_instance_config_ref)
+ )
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=300)
+
+@pytest.mark.depends('pingpong')
+@pytest.mark.teardown('pingpong')
+@pytest.mark.incremental
+class TestTeardownPingpong(object):
+ def test_teardown(self, mgmt_session):
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
+ for nsr in ns_instance_config.nsr:
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
+
+ time.sleep(60)
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get("/rw-project:project[rw-project:name='default']/vnfr-catalog")
+ assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
+
@brief Launchpad System Test
"""
+import gi
import json
import logging
import os
import pytest
-import shlex
import requests
+import shlex
import shutil
import subprocess
import tempfile
import time
import uuid
+import rift.auto.descriptor
import rift.auto.mano
import rift.auto.session
import rift.mano.examples.ping_pong_nsd as ping_pong
-import gi
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
from gi.repository import (
- NsdYang,
+ RwProjectNsdYang,
RwNsrYang,
RwVnfrYang,
NsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang,
RwLaunchpadYang,
RwBaseYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(scope='module')
def vnfd_proxy(request, mgmt_session):
- return mgmt_session.proxy(RwVnfdYang)
+ return mgmt_session.proxy(RwProjectVnfdYang)
@pytest.fixture(scope='module')
def rwvnfr_proxy(request, mgmt_session):
@pytest.fixture(scope='module')
def nsd_proxy(request, mgmt_session):
- return mgmt_session.proxy(NsdYang)
+ return mgmt_session.proxy(RwProjectNsdYang)
@pytest.fixture(scope='module')
def rwnsr_proxy(request, mgmt_session):
class DescriptorOnboardError(Exception):
pass
-def create_nsr(nsd, input_param_list, cloud_account_name):
- """
- Create the NSR record object
-
- Arguments:
- nsd - NSD
- input_param_list - list of input-parameter objects
- cloud_account_name - name of cloud account
-
- Return:
- NSR object
- """
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
-
- nsr.id = str(uuid.uuid4())
- nsr.name = rift.auto.mano.resource_name(nsr.id)
- nsr.short_name = "nsr_short_name"
- nsr.description = "This is a description"
- nsr.nsd.from_dict(nsr.as_dict())
- nsr.admin_status = "ENABLED"
- nsr.input_parameter.extend(input_param_list)
- nsr.cloud_account = cloud_account_name
-
- return nsr
def upload_descriptor(logger, descriptor_file, host="127.0.0.1"):
curl_cmd = 'curl --insecure -F "descriptor=@{file}" https://{host}:4567/api/upload'.format(
return transaction_id
-def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1"):
+def wait_onboard_transaction_finished(logger, transaction_id, timeout=30, host="127.0.0.1", project="default"):
def check_status_onboard_status():
- uri = 'https://%s:4567/api/upload/%s/state' % (host, transaction_id)
+ uri = 'https://%s:8008/api/operational/project/%s/create-jobs/job/%s' % (host, project, transaction_id)
curl_cmd = 'curl --insecure {uri}'.format(uri=uri)
return subprocess.check_output(shlex.split(curl_cmd), universal_newlines=True)
"""
logger.debug("Terminating Ping Pong NSRs")
- nsr_path = "/ns-instance-config"
+ nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
nsr = rwnsr_proxy.get_config(nsr_path)
nsrs = nsr.nsr
xpaths = []
for ping_pong in nsrs:
- xpath = "/ns-instance-config/nsr[id='{}']".format(ping_pong.id)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ping_pong.id))
rwnsr_proxy.delete_config(xpath)
xpaths.append(xpath)
assert nsr is None
# Get the ns-instance-config
- ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+ ns_instance_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config")
# Termination tests
- vnfr = "/vnfr-catalog/vnfr"
+ vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
assert vnfrs is None or len(vnfrs.vnfr) == 0
- # nsr = "/ns-instance-opdata/nsr"
+ # nsr = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr"
# nsrs = rwnsr_proxy.get(nsr, list_obj=True)
# assert len(nsrs.nsr) == 0
"""Generates & On-boards the descriptors.
"""
temp_dirs = []
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
endpoint = "upload"
"""
scheme,
cert)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should two vnfds"
assert "ping_vnfd" in [vnfds[0].name, vnfds[1].name]
def delete_vnfds():
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
time.sleep(5)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
scheme,
cert)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
assert nsds[0].name == "ping_pong_nsd"
# for temp_dir in temp_dirs:
# temp_dir.cleanup()
- def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+ def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account, use_accounts):
def verify_input_parameters(running_config, config_param):
"""
config_param.value,
running_config.input_parameter))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:vendor" % quoted_key(nsd.id)
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
input_parameters.append(input_param_1)
- nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+ nsr_id = str(uuid.uuid4())
+ if use_accounts:
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account.name,
+ nsr_id,
+ nsd,
+ input_param_list=input_parameters,
+ account=cloud_account.name,
+ nsr_id=nsr_id
+ )
+ else:
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account.name,
+ nsr_id,
+ nsd,
+ input_param_list=input_parameters,
+ nsr_id=nsr_id
+ )
logger.info("Instantiating the Network Service")
- rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
assert nsr_opdata is not None
# Verify the input parameter configuration
- running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+ running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
for input_param in input_parameters:
verify_input_parameters(running_config, input_param)
def test_wait_for_pingpong_started(self, rwnsr_proxy):
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
for nsr in nsrs:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
- nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+ quoted_key(nsr.ns_instance_config_ref))
rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
def test_wait_for_pingpong_configured(self, rwnsr_proxy):
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
for nsr in nsrs:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
- nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+ quoted_key(nsr.ns_instance_config_ref))
rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
"""Generates & On-boards the descriptors.
"""
temp_dirs = []
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
endpoint = "update"
ping_vnfd, pong_vnfd, ping_pong_nsd = ping_pong_records
scheme,
cert)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should two vnfds"
assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
def delete_nsds():
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
for nsd_record in nsds.nsd:
- xpath = "/nsd-catalog/nsd[id='{}']".format(nsd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd_record.id))
nsd_proxy.delete_config(xpath)
time.sleep(5)
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
assert nsds is None or len(nsds.nsd) == 0
delete_nsds()
def delete_vnfds():
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
time.sleep(5)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
delete_vnfds()
scheme,
cert)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
assert nsds[0].name == "ping_pong_nsd"
# for temp_dir in temp_dirs:
# temp_dir.cleanup()
- def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account):
+ def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account, use_accounts):
def verify_input_parameters(running_config, config_param):
"""
Verify the configured parameter set against the running configuration
config_param.value,
running_config.input_parameter))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:vendor" % nsd.id
+ descr_xpath = "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id=%s]/project-nsd:vendor" % quoted_key(nsd.id)
descr_value = "automation"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
input_parameters.append(input_param_1)
- nsr = create_nsr(nsd, input_parameters, cloud_account.name)
+ nsr_id = str(uuid.uuid4())
+ if use_accounts:
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account.name,
+ nsr_id,
+ nsd,
+ input_param_list=input_parameters,
+ account=cloud_account.name,
+ nsr_id=nsr_id
+ )
+ else:
+ nsr = rift.auto.descriptor.create_nsr(
+ cloud_account.name,
+ nsr_id,
+ nsd,
+ input_param_list=input_parameters,
+ nsr_id=nsr_id
+ )
logger.info("Instantiating the Network Service")
- rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
assert nsr_opdata is not None
# Verify the input parameter configuration
- running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+ running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
for input_param in input_parameters:
verify_input_parameters(running_config, input_param)
def test_wait_for_pingpong_started(self, rwnsr_proxy):
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
for nsr in nsrs:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(
- nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+ quoted_key(nsr.ns_instance_config_ref))
rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=180)
def test_wait_for_pingpong_configured(self, rwnsr_proxy):
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
for nsr in nsrs:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(
- nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+ quoted_key(nsr.ns_instance_config_ref))
rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
Asserts:
The records are deleted.
"""
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
for nsd in nsds.nsd:
- xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
nsd_proxy.delete_config(xpath)
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
assert nsds is None or len(nsds.nsd) == 0
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
# Creation Date: 2016/01/04
#
+import gi
import pytest
-import rift.vcs.vcs
import time
-import gi
+import rift.vcs.vcs
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.fixture(scope='module')
def rwnsr_proxy(mgmt_session):
time.sleep(60)
rift.vcs.vcs.wait_until_system_started(mgmt_session)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
for nsr in nsr_opdata.nsr:
- xpath = ("/ns-instance-opdata"
- "/nsr[ns-instance-config-ref='%s']"
- "/operational-status") % (nsr.ns_instance_config_ref)
+ xpath = ("/rw-project:project[rw-project:name='default']/ns-instance-opdata"
+ "/nsr[ns-instance-config-ref=%s]"
+ "/operational-status") % (quoted_key(nsr.ns_instance_config_ref))
operational_status = rwnsr_proxy.get(xpath)
assert operational_status == 'running'
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.io Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
import collections
+import gi
+import json
+import operator
+import os
+import pytest
+import re
import socket
import subprocess
import time
-import pytest
-
-import gi
-import re
+from scapy.all import rdpcap, UDP, TCP, IP
gi.require_version('RwNsrYang', '1.0')
from gi.repository import (
- NsdYang,
+ RwProjectNsdYang,
RwBaseYang,
RwConmanYang,
RwNsrYang,
- RwNsdYang,
RwVcsYang,
RwVlrYang,
- RwVnfdYang,
+ RwProjectVnfdYang,
RwVnfrYang,
VlrYang,
VnfrYang,
+ NsrYang,
)
+import rift.auto.mano
import rift.auto.session
import rift.mano.examples.ping_pong_nsd as ping_pong
+from rift.auto.ssh import SshSession
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.fixture(scope='module')
'''
return ping_pong_factory.generate_descriptors()
+@pytest.fixture(scope='session')
+def updated_ping_pong_descriptors(updated_ping_pong_records):
+ '''Fixture which returns a set of updated descriptors that can be configured through
+ the management interface.
+
+ The descriptors generated by the descriptor generation process for packages don't include project
+ information (presumably in order to avoid tying them to particular project). Here they are converted
+ to types that include project information which can then be used to configure the system.
+ '''
+ ping, pong, ping_pong = updated_ping_pong_records
+ proj_ping_vnfd = RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(ping.vnfd.as_dict())
+ proj_pong_vnfd = RwProjectVnfdYang.YangData_RwProject_Project_VnfdCatalog_Vnfd.from_dict(pong.vnfd.as_dict())
+ proj_ping_pong_nsd = RwProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd.from_dict(ping_pong.descriptor.as_dict()['nsd'][0])
+ return proj_ping_vnfd, proj_pong_vnfd, proj_ping_pong_nsd
+
+
+class JobStatusError(Exception):
+ """JobStatusError."""
+
+ pass
+
+
def yield_vnfd_vnfr_pairs(proxy, nsr=None):
"""
Yields tuples of vnfd & vnfr entries.
Tuple: VNFD and its corresponding VNFR entry
"""
def get_vnfd(vnfd_id):
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
- return proxy(RwVnfdYang).get(xpath)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id))
+ return proxy(RwProjectVnfdYang).get(xpath)
- vnfr = "/vnfr-catalog/vnfr"
+ vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
for vnfr in vnfrs.vnfr:
"""
for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
- nsd_path = "/nsd-catalog/nsd[id='{}']".format(
- nsr_cfg.nsd.id)
- nsd = proxy(RwNsdYang).get_config(nsd_path)
+ nsd_path = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(
+ quoted_key(nsr_cfg.nsd.id))
+ nsd = proxy(RwProjectNsdYang).get_config(nsd_path)
yield nsd, nsr
Yields:
Tuple: NSR config and its corresponding NSR op record
"""
- nsr = "/ns-instance-opdata/nsr"
+ nsr = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr"
nsrs = proxy(RwNsrYang).get(nsr, list_obj=True)
for nsr in nsrs.nsr:
- nsr_cfg_path = "/ns-instance-config/nsr[id='{}']".format(
- nsr.ns_instance_config_ref)
+ nsr_cfg_path = "/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(
+ quoted_key(nsr.ns_instance_config_ref))
nsr_cfg = proxy(RwNsrYang).get_config(nsr_cfg_path)
yield nsr_cfg, nsr
boolean
"""
try:
- socket.inet_aton(address)
- except socket.error:
- return False
- else:
+ socket.inet_pton(socket.AF_INET, address)
return True
+ except socket.error:
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ return True
+ except socket.error:
+ return False
+ def is_ipv6(self, address):
+ """Returns True if address is of type 'IPv6', else False."""
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ return True
+ except socket.error:
+ return False
@pytest.mark.feature("recovery")
def test_tasklets_recovery(self, mgmt_session, proxy, recover_tasklet):
def test_records_present(self, proxy):
assert_records(proxy)
- def test_nsd_ref_count(self, proxy):
- """
- Asserts
- 1. The ref count data of the NSR with the actual number of NSRs
- """
- nsd_ref_xpath = "/ns-instance-opdata/nsd-ref-count"
- nsd_refs = proxy(RwNsrYang).get(nsd_ref_xpath, list_obj=True)
-
- expected_ref_count = collections.defaultdict(int)
- for nsd_ref in nsd_refs.nsd_ref_count:
- expected_ref_count[nsd_ref.nsd_id_ref] = nsd_ref.instance_ref_count
-
- actual_ref_count = collections.defaultdict(int)
- for nsd, nsr in yield_nsd_nsr_pairs(proxy):
- actual_ref_count[nsd.id] += 1
-
- assert expected_ref_count == actual_ref_count
-
def test_vnfd_ref_count(self, proxy):
"""
Asserts
1. The ref count data of the VNFR with the actual number of VNFRs
"""
- vnfd_ref_xpath = "/vnfr-catalog/vnfd-ref-count"
+ vnfd_ref_xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfd-ref-count"
vnfd_refs = proxy(RwVnfrYang).get(vnfd_ref_xpath, list_obj=True)
expected_ref_count = collections.defaultdict(int)
for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
assert vnfd.mgmt_interface.port == vnfr.mgmt_interface.port
assert len(vnfd.vdu) == len(vnfr.vdur)
-
for vdud, vdur in zip(vnfd.vdu, vnfr.vdur):
- assert vdud.vm_flavor == vdur.vm_flavor
+ for field in vdud.vm_flavor.fields:
+ if field in vdur.vm_flavor.fields:
+ assert getattr(vdud.vm_flavor, field) == getattr(vdur.vm_flavor, field)
assert self.is_valid_ip(vdur.management_ip) is True
- assert vdud.external_interface[0].vnfd_connection_point_ref == \
- vdur.external_interface[0].vnfd_connection_point_ref
+
+ vdur_intf_dict = {}
+ for intf in vdur.interface:
+ vdur_intf_dict[intf.name] = intf.external_connection_point_ref if 'external_connection_point_ref' in \
+ intf.as_dict() else intf.internal_connection_point_ref
+ for intf in vdud.interface:
+ assert intf.name in vdur_intf_dict
+ if intf.internal_connection_point_ref:
+ vdud_intf_cp_ref = intf.internal_connection_point_ref
+ else:
+ vdud_intf_cp_ref = intf.external_connection_point_ref
+ assert vdur_intf_dict[intf.name] == vdud_intf_cp_ref
def test_external_vl(self, proxy):
"""
assert cp_des[0].name == cp_rec[0].name
assert self.is_valid_ip(cp_rec[0].ip_address) is True
- xpath = "/vlr-catalog/vlr[id='{}']".format(cp_rec[0].vlr_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(cp_rec[0].vlr_ref))
vlr = proxy(RwVlrYang).get(xpath)
assert len(vlr.network_id) > 0
assert self.is_valid_ip(ip) is True
assert vlr.operational_status == "running"
-
+ @pytest.mark.skipif(pytest.config.getoption("--port-sequencing"), reason="port-sequencing test uses two VLs in NSD")
def test_nsr_record(self, proxy):
"""
Currently we only test for the components of NSR tests. Ignoring the
"""
for nsr_cfg, nsr in yield_nsrc_nsro_pairs(proxy):
# 1 n/w and 2 connection points
- assert len(nsr.vlr) == 1
+ assert len(nsr.vlr) == 2
assert len(nsr.vlr[0].vnfr_connection_point_ref) == 2
assert len(nsr.constituent_vnfr_ref) == 2
assert nsr_cfg.admin_status == 'ENABLED'
- def test_wait_for_pingpong_configured(self, proxy):
- nsr_opdata = proxy(RwNsrYang).get('/ns-instance-opdata')
+ def test_wait_for_ns_configured(self, proxy):
+ nsr_opdata = proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
current_nsr = nsrs[0]
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(current_nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(current_nsr.ns_instance_config_ref))
proxy(RwNsrYang).wait_for(xpath, "configured", timeout=400)
- def test_monitoring_params(self, proxy):
+ def test_wait_for_pingpong_vnf_configured(self, proxy):
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]/config-status".format(quoted_key(vnfr.id))
+ proxy(VnfrYang).wait_for(xpath, "configured", timeout=400)
+
+ def test_vnf_monitoring_params(self, proxy):
"""
Asserts:
1. The value counter ticks?
2. If the meta fields are copied over
"""
def mon_param_record(vnfr_id, mon_param_id):
- return '/vnfr-catalog/vnfr[id="{}"]/monitoring-param[id="{}"]'.format(
- vnfr_id, mon_param_id)
+ return '/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr[id={}]/monitoring-param[id={}]'.format(
+ quoted_key(vnfr_id), quoted_key(mon_param_id))
for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
for mon_des in (vnfd.monitoring_param):
# Tick check
#assert mon_rec.value_integer > 0
- def test_cm_nsr(self, proxy):
+ def test_ns_monitoring_params(self, logger, proxy):
+ """
+ Asserts:
+ 1. monitoring-param match in nsd and ns-opdata
+ 2. The value counter ticks?
+ """
+ mon_param_path = '/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/monitoring-param[id={}]'
+ def fetch_monparam_value(nsr_ref, mon_param_id):
+ """Returns the monitoring parameter value"""
+ mon_param = proxy(NsrYang).get(mon_param_path.format(quoted_key(nsr_ref), quoted_key(mon_param_id)))
+ return mon_param.value_integer
+
+ def check_monparam_value(nsr_ref, mon_param_id):
+ """Check if monitoring-param values are getting updated"""
+ recent_mon_param_value = fetch_monparam_value(nsr_ref, mon_param_id)
+
+ # Monitor the values over a period of 60 secs. Fail the test if there is no update in mon-param value.
+ s_time = time.time()
+ while (time.time() - s_time) < 60:
+ if fetch_monparam_value(nsr_ref, mon_param_id) > recent_mon_param_value:
+ return
+ time.sleep(5)
+ assert False, 'mon-param values are not getting updated. Last value was {}'.format(recent_mon_param_value)
+
+ for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+ assert len(nsd.monitoring_param) == len(nsr.monitoring_param)
+ for mon_param in nsr.monitoring_param:
+ logger.info('Verifying monitoring-param: {}'.format(mon_param.as_dict()))
+ check_monparam_value(nsr.ns_instance_config_ref, mon_param.id)
+
+ def test_cm_nsr(self, proxy, use_accounts):
"""
Asserts:
1. The ID of the NSR in cm-state
4. State of the cm-nsr
"""
for nsd, nsr in yield_nsd_nsr_pairs(proxy):
- con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr.ns_instance_config_ref)
+ con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(
+ quoted_key(nsr.ns_instance_config_ref))
con_data = proxy(RwConmanYang).get(con_nsr_xpath)
- assert con_data.name == "ping_pong_nsd"
+ if not use_accounts:
+ assert con_data.name == rift.auto.mano.resource_name(nsd.name)
+
assert len(con_data.cm_vnfr) == 2
state_path = con_nsr_xpath + "/state"
2. Name of the vnfr
3. State of the VNFR
4. Checks for a reachable IP in mgmt_interface
- 5. Basic checks for connection point and cfg_location.
+ 5. Basic checks for connection point
"""
def is_reachable(ip, timeout=10):
rc = subprocess.call(["ping", "-c1", "-w", str(timeout), ip])
return False
nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
- con_nsr_xpath = "/cm-state/cm-nsr[id='{}']".format(nsr_cfg.id)
+ con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
for _, vnfr in yield_vnfd_vnfr_pairs(proxy):
- con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id='{}']".format(vnfr.id)
+ con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
con_data = proxy(RwConmanYang).get(con_vnfr_path)
assert con_data is not None
con_data = proxy(RwConmanYang).get(con_vnfr_path)
assert is_reachable(con_data.mgmt_interface.ip_address) is True
- assert len(con_data.connection_point) == 1
- connection_point = con_data.connection_point[0]
- assert connection_point.name == vnfr.connection_point[0].name
- assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+ if pytest.config.getoption("--port-sequencing"):
+ # there are more than one connection point in the VNFDs for port sequencing test
+ # there is no distinction between icp and cp in 'show cm-state'.
+ # both icp and cp come under connection-point in 'show cm-state'
+ vnfr_intl_extl_connection_points_dict = {}
+ for icp in vnfr.vdur[0].internal_connection_point:
+ vnfr_intl_extl_connection_points_dict[icp.name] = icp.ip_address
+ for cp in vnfr.connection_point:
+ vnfr_intl_extl_connection_points_dict[cp.name] = cp.ip_address
+
+ assert len(con_data.connection_point) == len(vnfr_intl_extl_connection_points_dict)
+ for cp in con_data.connection_point:
+ assert cp.name in vnfr_intl_extl_connection_points_dict
+ assert cp.ip_address == vnfr_intl_extl_connection_points_dict[cp.name]
+ else:
+ assert len(con_data.connection_point) == 2
+ connection_point = con_data.connection_point[0]
+ assert connection_point.name == vnfr.connection_point[0].name
+ assert connection_point.ip_address == vnfr.connection_point[0].ip_address
+
+ @pytest.mark.skipif(
+ not (pytest.config.getoption("--static-ip") or pytest.config.getoption("--update-vnfd-instantiate")),
+ reason="need --static-ip or --update-vnfd-instantiate option to run")
+ def test_static_ip(self, proxy, logger, vim_clients, cloud_account_name):
+ """
+ Asserts:
+ 1. static-ip match in vnfd and vnfr
+ 2. static-ip match in cm-state
+ 3. Get the IP of openstack VM. Match the static-ip
+ 4. Check if the VMs are reachable from each other (Skip if type of static ip addresses is IPv6)
+ """
+ nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+ con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
+
+ ips = {}
+ static_ip_vnfd = False
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ if vnfd.vdu[0].interface[1].static_ip_address:
+ static_ip_vnfd = True
+ assert vnfd.vdu[0].interface[1].static_ip_address == vnfr.connection_point[1].ip_address
+ if 'ping' in vnfd.name:
+ ips['mgmt_ip'] = vnfr.vdur[0].management_ip
+ else:
+ ips['static_ip'] = vnfd.vdu[0].interface[1].static_ip_address
- assert con_data.cfg_location is not None
+ con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
+ con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+ assert con_data is not None
+ assert con_data.connection_point[1].ip_address == vnfd.vdu[0].interface[1].static_ip_address
+
+ xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(vnfr.connection_point[1].vlr_ref))
+ vlr = proxy(RwVlrYang).get(xpath)
+
+ vim_client = vim_clients[cloud_account_name]
+ vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+ logger.info('VM properties for {}: {}'.format(vnfd.name, vm_property))
+
+ addr_prop_list = vm_property['addresses'][vlr.name]
+ logger.info('addresses attribute: {}'.format(addr_prop_list))
+
+ addr_prop = [addr_prop for addr_prop in addr_prop_list if addr_prop['addr'] == vnfr.connection_point[1].ip_address]
+ assert addr_prop
+
+ assert static_ip_vnfd # if False, then none of the VNF descriptors' connections points are carrying static-ip-address field.
+
+ # Check if the VMs are reachable from each other
+ username, password = ['fedora'] * 2
+ ssh_session = SshSession(ips['mgmt_ip'])
+ assert ssh_session
+ assert ssh_session.connect(username=username, password=password)
+ if not self.is_ipv6(ips['static_ip']):
+ assert ssh_session.run_command('ping -c 5 {}'.format(ips['static_ip']))[0] == 0
+
+ @pytest.mark.skipif(not pytest.config.getoption("--vnf-dependencies"), reason="need --vnf-dependencies option to run")
+ def test_vnf_dependencies(self, proxy):
+ """
+ Asserts:
+ 1. Match various config parameter sources with config primitive parameters
+ Three types of sources are being verified for pong vnfd.
+ Attribute: A runtime value like IP address of a connection point (../../../mgmt-interface, ip-address)
+ Descriptor: a XPath to a leaf in the VNF descriptor/config (../../../mgmt-interface/port)
+ Value: A pre-defined constant ('admin' as mentioned in pong descriptor)
+ 2. Match the config-parameter-map defined in NS descriptor
+ There used to be a check to verify config parameter values in cm-state (cm-state/cm-nsr/cm-vnfr/config-parameter).
+ Recently that got removed due to confd issue. So, there is no such check currently for cm-state.
+ """
+ nsr_cfg, _ = list(yield_nsrc_nsro_pairs(proxy))[0]
+ con_nsr_xpath = "/rw-project:project[rw-project:name='default']/cm-state/cm-nsr[id={}]".format(quoted_key(nsr_cfg.id))
+
+ pong_source_map, ping_request_map = None, None
+
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ # Get cm-state for this vnfr
+ con_vnfr_path = con_nsr_xpath + "/cm-vnfr[id={}]".format(quoted_key(vnfr.id))
+ con_data = proxy(RwConmanYang).get(con_vnfr_path)
+
+ # Match various config parameter sources with config primitive parameters
+ for config_primitive in vnfr.vnf_configuration.config_primitive:
+ if config_primitive.name in ("config", "start-stop"):
+ for parameter in config_primitive.parameter:
+ if parameter.name == 'mgmt_ip':
+ assert parameter.default_value == vnfr.mgmt_interface.ip_address
+ if parameter.name == 'mgmt_port':
+ assert parameter.default_value == str(vnfd.mgmt_interface.port)
+ if parameter.name == 'username':
+ assert parameter.default_value == 'admin'
+
+ # Fetch the source parameter values from pong vnf and request parameter values from ping vnf
+ if config_primitive.name == "config":
+ if vnfd.name == "pong_vnfd":
+ pong_source_map = [parameter.default_value for parameter in config_primitive.parameter if
+ parameter.name in ("service_ip", "service_port")]
+ if vnfd.name == "ping_vnfd":
+ ping_request_map = [parameter.default_value for parameter in config_primitive.parameter if
+ parameter.name in ("pong_ip", "pong_port")]
+ assert pong_source_map
+ assert ping_request_map
+ # Match the config-parameter-map defined in NS descriptor
+ assert sorted(pong_source_map) == sorted(ping_request_map)
+
+ @pytest.mark.skipif(not pytest.config.getoption("--port-security"), reason="need --port-security option to run")
+ def test_port_security(self, proxy, vim_clients, cloud_account_name):
+ """
+ Asserts:
+ 1. port-security-enabled match in vnfd and vnfr
+ 2. Get port property from openstack. Match these attributes: 'port_security_enabled', 'security_groups'
+ """
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ assert vnfd.connection_point[1].port_security_enabled == vnfr.connection_point[1].port_security_enabled
+
+ xpath = "/rw-project:project[rw-project:name='default']/vlr-catalog/vlr[id={}]".format(quoted_key(vnfr.connection_point[1].vlr_ref))
+ vlr = proxy(RwVlrYang).get(xpath)
+
+ vim_client = vim_clients[cloud_account_name]
+ port = [port for port in vim_client.neutron_port_list() if port['network_id'] == vlr.network_id if
+ port['name'] == vnfr.connection_point[1].name]
+ assert port
+
+ port_openstack = port[0]
+ assert vnfr.connection_point[1].port_security_enabled == port_openstack['port_security_enabled']
+
+ if vnfr.connection_point[1].port_security_enabled:
+ assert port_openstack['security_groups'] # It has to carry at least one security group if enabled
+ else:
+ assert not port_openstack['security_groups']
+
+ @pytest.mark.skipif(not pytest.config.getoption("--port-sequencing"), reason="need --port-sequencing option to run")
+ def test_explicit_port_sequencing(self, proxy, vim_clients, cloud_account_name, logger, port_sequencing_intf_positions, iteration):
+ """
+ Asserts:
+ 1. Interface count match in vnfd and vnfr
+ 2. Get interface ordering(mac address) from VM using 'ip a' command; From output of neutron port-list, get
+ corresponding connection point names in the same order as mac address ordered list.
+ 3. Get interface ordering from the vnfd/vdu
+ 4. Compare lists from step-2 and step-3
+ """
+ username, password = ['fedora']*2
+
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ assert len(vnfd.vdu[0].interface) == len(vnfr.vdur[0].interface)
+
+ logger.debug('Interface details for vnfd {}: {}'.format(vnfd.name, vnfd.vdu[0].as_dict()['interface']))
+
+ if iteration==1:
+ tmp_positional_values_list = []
+ for intf in vnfr.vdur[0].interface:
+ # if no position is specified for an interface, then vnfr/vdur/interface carries 0 as its positional value
+ if intf.position!=0:
+ tmp_positional_values_list.append(intf.position)
+ if 'ping' in vnfd.name:
+ assert not tmp_positional_values_list
+ if 'pong' in vnfd.name:
+ assert set(tmp_positional_values_list) == set(port_sequencing_intf_positions)
+
+ # Get a sorted list of interfaces from vnfd/vdu
+ icp_key_name, ecp_key_name = 'internal_connection_point_ref', 'external_connection_point_ref'
+ intf_with_position_field_dict, intf_without_position_field_list = {}, []
+
+ for intf in vnfd.vdu[0].interface:
+ intf = intf.as_dict()
+ cp_ref_key = icp_key_name if icp_key_name in intf else ecp_key_name
+ if 'position' in intf:
+ intf_with_position_field_dict[intf['position']] = intf[cp_ref_key]
+ else:
+ intf_without_position_field_list.append(intf[cp_ref_key])
+
+ intf_with_position_field_list = sorted(intf_with_position_field_dict.items(), key=operator.itemgetter(0))
+ sorted_cp_names_in_vnfd = [pos_cpname_tuple[1] for pos_cpname_tuple in intf_with_position_field_list] + \
+ sorted(intf_without_position_field_list)
+
+ # Establish a ssh session to VDU to get mac address list sorted by interfaces
+ ssh_session = SshSession(vnfr.vdur[0].management_ip)
+ assert ssh_session
+ assert ssh_session.connect(username=username, password=password)
+ e_code, ip_output, err = ssh_session.run_command('sudo ip a')
+ assert e_code == 0
+ logger.debug('Output of "ip a": {}'.format(ip_output))
+ mac_addr_list = re.findall(r'link/ether\s+(.*)\s+brd', ip_output)
+
+ # exclude eth0 as it is always a mgmt-interface
+ interface_starting_index = len(mac_addr_list) - len(vnfd.vdu[0].interface)
+ mac_addr_list = mac_addr_list[interface_starting_index: ]
+
+ # Get neutron port list
+ neutron_port_list = vim_clients[cloud_account_name].neutron_port_list()
+
+ # Get those ports whose mac_address value matches with one of the mac addresses in mac_addr_list
+ # This new list is already sorted as the outer loop iterates over mac_addr_list
+ sorted_cp_names_in_vm = [neutron_port_dict['name'] for mac in mac_addr_list for neutron_port_dict in neutron_port_list
+ if mac==neutron_port_dict['mac_address']]
+
+ logger.debug('Sorted connection points as per "ip a" in VM: {}'.format(sorted_cp_names_in_vm))
+ logger.debug('Sorted connection points as per ordering mentioned in vnfd: {}'.format(sorted_cp_names_in_vnfd))
+
+ assert sorted_cp_names_in_vm == sorted_cp_names_in_vnfd
+
+ @pytest.mark.skipif(
+ not (pytest.config.getoption("--vnf-dependencies") and
+ pytest.config.getoption("--service-primitive")),
+ reason="need --vnf-dependencies and --service-primitive option to run")
+ def test_primitives(
+ self, mgmt_session, cloud_module, cloud_account, descriptors,
+ fmt_nsd_catalog_xpath, logger):
+ """Testing service primitives and config primitives."""
+ # Create a cloud account
+ rift.auto.mano.create_cloud_account(
+ mgmt_session, cloud_account, "default")
+
+ rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ nsr_pxy = mgmt_session.proxy(NsrYang)
+ rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+
+ # Testing a custom service primitive
+ ns_opdata = rwnsr_pxy.get(
+ '/rw-project:project[rw-project:name="default"]' +
+ '/ns-instance-opdata/nsr'
+ )
+ nsr_id = ns_opdata.ns_instance_config_ref
+ sp_rpc_input = NsrYang.YangInput_Nsr_ExecNsServicePrimitive.from_dict(
+ {'name': 'primitive_test', 'nsr_id_ref': nsr_id})
+ nsr_pxy.rpc(sp_rpc_input)
+
+ # Testing a config primitive
+ vnfr_catalog = rwvnfr_pxy.get(
+ '/rw-project:project[rw-project:name="default"]' +
+ '/vnfr-catalog'
+ )
+ cp_rpc_input = NsrYang.YangInput_Nsr_ExecNsServicePrimitive.from_dict(
+ {'nsr_id_ref': nsr_id})
+ vnf_list = cp_rpc_input.create_vnf_list()
+ vnf_primitive = vnf_list.create_vnf_primitive()
+ vnf_primitive.index = 1
+ vnf_primitive.name = "start-stop"
+ vnf_list.member_vnf_index_ref = (
+ vnfr_catalog.vnfr[0].member_vnf_index_ref
+ )
+ vnf_list._set_vnfr_id_ref(vnfr_catalog.vnfr[0].id)
+ vnf_list.vnf_primitive.append(vnf_primitive)
+ cp_rpc_input.vnf_list.append(vnf_list)
+ nsr_pxy.rpc(cp_rpc_input)
+ # Checking nsd joblist to see if both tests passed
+
+ def check_job_status(status=None):
+ ns_opdata = rwnsr_pxy.get(
+ '/rw-project:project[rw-project:name="default"]' +
+ '/ns-instance-opdata/nsr'
+ )
+ counter = 0
+ counter_limit = 2
+ for idx in range(0, counter_limit):
+ if ns_opdata.config_agent_job[idx].job_status == 'failure':
+ err_msg = (
+ 'Service primitive test failed.' +
+ ' The config agent reported failure job status')
+ raise JobStatusError(err_msg)
+
+ elif ns_opdata.config_agent_job[idx].job_status == 'success':
+ counter += 1
+ continue
+
+ if counter == counter_limit:
+ return True
+ else:
+ time.sleep(5)
+ return False
+
+ start_time = time.time()
+ while (time.time() - start_time < 60):
+ status = check_job_status()
+ if status:
+ break
+ else:
+ err_msg = (
+ 'Service primitive test failed. Timed out: 60 seconds' +
+ 'The config agent never reached a success status')
+ raise JobStatusError(err_msg)
+
+ @pytest.mark.skipif(
+ not (pytest.config.getoption("--metadata-vdud") or pytest.config.getoption("--metadata-vdud-cfgfile")),
+ reason="need --metadata-vdud or --metadata-vdud-cfgfile option to run")
+ def test_metadata_vdud(self, logger, proxy, vim_clients, cloud_account_name, metadata_host):
+ """
+ Asserts:
+ 1. content of supplemental-boot-data match in vnfd and vnfr
+ vnfr may carry extra custom-meta-data fields (e.g pci_assignement) which are by default enabled during VM creation by openstack.
+ vnfr doesn't carry config_file details; so that will be skipped during matching.
+ 2. boot-data-drive match with openstack VM's config_drive attribute
+ 3. For each VDUD which have config-file fields mentioned, check if there exists a path in the VM which
+ matches with config-file's dest field. (Only applicable for cirros_cfgfile_vnfd VNF RIFT-15524)
+ 4. For each VDUD, match its custom-meta-data fields with openstack VM's properties field
+ """
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ if any(name in vnfd.name for name in ['ping', 'pong', 'fedora']):
+ username, password = ['fedora'] * 2
+ elif 'ubuntu' in vnfd.name:
+ username, password = ['ubuntu'] * 2
+ elif 'cirros' in vnfd.name:
+ username, password = 'cirros', 'cubswin:)'
+ else:
+ assert False, 'Not expected to use this VNFD {} in this systemtest. VNFD might have changed. Exiting the test.'.format(
+ vnfd.name)
+
+ # Wait till VNF's operational-status becomes 'running'
+ # The below check is usually covered as part of test_wait_for_ns_configured
+ # But, this is mostly needed when non- ping pong packages are used e.g cirrus cfgfile package
+ xpath = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr[id={}]/operational-status".format(quoted_key(vnfr.id))
+ proxy(VnfrYang).wait_for(xpath, "running", timeout=300)
+ time.sleep(5)
+
+ # Get the VDU details from openstack
+ vim_client = vim_clients[cloud_account_name]
+ vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+ logger.info('VM property for {}: {}'.format(vnfd.name, vm_property))
+
+ # Establish a ssh session to VDU
+ ssh_session = SshSession(vnfr.vdur[0].management_ip)
+ assert ssh_session
+ assert ssh_session.connect(username=username, password=password)
+
+ assert vnfd.vdu[0].supplemental_boot_data.boot_data_drive == vnfr.vdur[
+ 0].supplemental_boot_data.boot_data_drive == bool(vm_property['config_drive'])
+ # Using bool() because vm_property['config_drive'] returns 'True' or '' whereas vnfr/vnfd returns True/False
+
+ # Assert 3: only for cirros vnf
+ if 'cirros' in vnfd.name:
+ for config_file in vnfd.vdu[0].supplemental_boot_data.config_file:
+ assert ssh_session.run_command('test -e {}'.format(config_file.dest))[0] == 0
+
+ vdur_metadata = {metadata.name: metadata.value for metadata in
+ vnfr.vdur[0].supplemental_boot_data.custom_meta_data}
+
+ # Get the user-data/metadata from VM
+ e_code, vm_metadata, _ = ssh_session.run_command(
+ 'curl http://{}/openstack/latest/meta_data.json'.format(metadata_host))
+ assert e_code == 0
+ vm_metadata = json.loads(vm_metadata)['meta']
+ logger.debug('VM metadata for {}: {}'.format(vnfd.name, vm_metadata))
+
+ for vdud_metadata in vnfd.vdu[0].supplemental_boot_data.custom_meta_data:
+ assert vdud_metadata.value == vdur_metadata[vdud_metadata.name]
+ assert vdud_metadata.value == vm_metadata[vdud_metadata.name]
+
+ @pytest.mark.skipif(not pytest.config.getoption("--multidisk"), reason="need --multidisk option to run")
+ def test_multidisk(self, logger, proxy, vim_clients, cloud_account_name, multidisk_testdata):
+ """
+ This feature is only supported in openstack, brocade vCPE.
+ Asserts:
+ 1. volumes match in vnfd and vnfr
+ 2. volumes match in vnfr and openstack host
+ Check no of volumes attached to the VNF VM. It should match no of volumes defined in VDUD.
+ Match volume names. In 'openstack volume show <vol_uuid>', the device should be /dev/<volume_name_in_vdud>
+ Match the volume source.
+ Match the volume size.
+ Match the Volume IDs mentioned in VNFR with openstack volume's ID.
+ """
+ ping_test_data, pong_test_data = multidisk_testdata
+ vol_attr = ['device_type', None, 'size', 'image', 'boot_priority']
+ # device_bus doesn't appear in vnfr/vdur
+
+ for vnfd, vnfr in yield_vnfd_vnfr_pairs(proxy):
+ logger.info('Verifying VNF {}'.format(vnfd.name))
+ vnf_testdata = ping_test_data if 'ping' in vnfd.name else pong_test_data
+
+ # Assert 1: Match volumes in vnfd, vnfr, test data
+ assert len(vnfd.vdu[0].volumes) == len(vnfr.vdur[0].volumes)
+
+ for vnfr_vol in vnfr.vdur[0].volumes:
+ logger.info('Verifying vnfr volume: {}'.format(vnfr_vol.as_dict()))
+ vnfd_vol = [vol for vol in vnfd.vdu[0].volumes if vol.name==vnfr_vol.name][0]
+
+ vol_testdata = vnf_testdata[vnfr_vol.name]
+
+ for i, attr in enumerate(vol_attr):
+ if attr == None: # device_bus doesn't appear in vnfr/vdur
+ continue
+ if i == 3 and (vol_testdata[i]==None or getattr(vnfd_vol, 'ephemeral')):
+ # volume source of type ephemeral doesn't appear in vnfr/vdur
+ # If no image is defined for a volume, getattr(vnfr_vol, 'ephemeral') returns False. Strange. RIFT-15165
+ assert not getattr(vnfd_vol, 'image')
+ continue
+
+ assert getattr(vnfd_vol, attr) == getattr(vnfr_vol, attr)
+ if vol_testdata[i] is not None:
+ assert getattr(vnfd_vol, attr) == vol_testdata[i]
+
+ # Assert 2: Volumes match in vnfr and openstack host
+ # Get VM properties from the VIM
+ vim_client = vim_clients[cloud_account_name]
+ vm_property = vim_client.nova_server_get(vnfr.vdur[0].vim_id)
+ logger.info('VIM- VM properties: {}'.format(vm_property))
+
+ # Get the volumes attached to this VNF VM
+ vim_volumes = vm_property['os-extended-volumes:volumes_attached']
+ logger.info('VIM- Volumes attached to this VNF VM: {}'.format(vim_volumes))
+
+ assert vim_volumes
+ assert len(vim_volumes) == len(vnfr.vdur[0].volumes)
+
+ vnfr_volumes_by_id = {vol.volume_id:vol for vol in vnfr.vdur[0].volumes}
+ for vim_volume in vim_volumes:
+ # Match the Volume IDs mentioned in VNFR with openstack volume's ID.
+ logger.info('Verifying volume: {}'.format(vim_volume['id']))
+ assert vim_volume['id'] in vnfr_volumes_by_id.keys()
+ vnfr_vol_ = vnfr_volumes_by_id[vim_volume['id']]
+
+ # Get volume details. Equivalent cli: openstack volume show <uuid>
+ vim_vol_attrs = vim_client.cinder_volume_get(vim_volume['id'])
+
+ # Match volume size
+ assert vnfr_vol_.size == vim_vol_attrs.size
+
+ # Match volume source
+ if vnfr_vol_.image: # To make sure this is not ephemeral type
+ logger.info('VIM- Image details of the volume: {}'.format(vim_vol_attrs.volume_image_metadata))
+ assert vnfr_vol_.image == vim_vol_attrs.volume_image_metadata['image_name']
+ else:
+ assert not hasattr(vim_vol_attrs, 'volume_image_metadata')
+
+ # Match volume name e.g 'device': u'/dev/vdf'
+ logger.info('Verifying [{}] in attached volumes {}'.format(vnfr_vol_.name, vim_vol_attrs.attachments))
+ assert [attachment for attachment in vim_vol_attrs.attachments if vnfr_vol_.name in attachment['device']]
+
+ @pytest.mark.skipif(not pytest.config.getoption("--l2-port-chaining"), reason="need --l2-port-chaining option to run")
+ def test_l2_port_chaining(self, proxy):
+ """
+ It uses existing NS, VNF packages: $RIFT_INSTALL/usr/rift/mano/nsds/vnffg_demo_nsd/vnffg_l2portchain_*.
+ This test function is specific to these packages. Those VNFs use Ubuntu trusty image ubuntu_trusty_1404.qcow2.
+ Asserts:
+ 1. Count of VNFFG in nsd and nsr
+ 2. Count of rsp, classifier in VNFFG descriptor and VNFFG record
+ 3. Need details what other fields need to be matched in nsd and nsr
+ 4. Traffic flows through internal hops as per the classifier and rsp
+ As per the classifiers in NS package, the following flows will be tested.
+ - Tcp packets with dest port 80 starting from pgw VNF should go through Firewall VNF.
+ - Udp packets with source port 80 starting from router VNF should go through nat->dpi
+ - Udp packets with dest port 80 starting from pgw VNF should go through dpi->nat
+
+ """
+ UDP_PROTOCOL, TCP_PROTOCOL = 17, 6
+
+ def pcap_analysis(pcap_file, src_add, dst_add, src_port=None, dst_port=None, protocol=6):
+ """Analyse packets in a pcap file and return True if there is a packet match w.r.t src_addr, dst_addr, protocol.
+ Args:
+ pcap_file: pcap file that is generated by traffic analysis utility such as tcpdump
+ src_add, dst_addr: Source & dest IP which need to be matched for a packet
+ protocol: Protocol that needs to be matched for a packet which already matched src_addr, dst_addr (protocol accepts integer e.g TCP 6, UDP 17)
+
+ Returns:
+ timestamp of the packet which is matched (Needed to check packet flow order through VNFs)
+ or
+ False: if there is no packet match
+
+ It uses scapy module to analyse pcap file. pip3 install scapy-python3
+ Other options https://pypi.python.org/pypi/pypcapfile
+ """
+ assert os.path.exists(pcap_file)
+ pkt_type = TCP if protocol==6 else UDP
+
+ pcap_obj = rdpcap(pcap_file)
+ for pkt in pcap_obj:
+ if IP in pkt:
+ if not(pkt[IP].src==src_add and pkt[IP].dst==dst_add and pkt[IP].proto==protocol):
+ continue
+ if pkt_type in pkt:
+ if src_port:
+ if not (pkt[pkt_type].sport==src_port):
+ continue
+ if dst_port:
+ if not (pkt[pkt_type].dport==dst_port):
+ continue
+ return pkt[IP].time
+ return False
+
+ # Check the VNFFG in nsd and nsr
+ for nsd, nsr in yield_nsd_nsr_pairs(proxy):
+ vnffgds = nsd.vnffgd
+ vnffgrs = nsr.vnffgr
+ assert len(vnffgds) == len(vnffgrs)
+
+ # Check the classifier, rsp in nsd and nsr
+ for vnffgd in vnffgds:
+ vnffgr = [vnffgr for vnffgr in vnffgrs if vnffgd.id == vnffgr.vnffgd_id_ref][0]
+ assert len(vnffgd.rsp) == len(vnffgr.rsp)
+ assert len(vnffgd.classifier) == len(vnffgr.classifier)
+
+ vnfrs = proxy(RwVnfrYang).get('/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr', list_obj=True)
+
+ # Get the IP of VMs
+ vm_names = ('router', 'firewall', 'dpi', 'nat', 'pgw')
+ vm_ips = {vm_name: vnfr.vdur[0].vm_management_ip for vm_name in vm_names for vnfr in vnfrs.vnfr if
+ vm_name in vnfr.name}
+ vm_cp_ips = {vm_name: vnfr.connection_point[0].ip_address for vm_name in vm_names for vnfr in vnfrs.vnfr if
+ vm_name in vnfr.name}
+
+ # Establish Ssh sessions to the VMs
+ ssh_sessions = {}
+ for vm_name, vm_ip in vm_ips.items():
+ ssh_session = SshSession(vm_ip)
+ assert ssh_session
+ assert ssh_session.connect(username='ubuntu', password='ubuntu')
+ ssh_sessions[vm_name] = ssh_session
+
+ # Start python's SimpleHTTPServer on port 80 in the router VM
+ e_code, _, _ = ssh_sessions['router'].run_command('sudo python -m SimpleHTTPServer 80', max_wait=5)
+ assert e_code is None # Due to blocking call, it should timeout and return 'None' as exit code
+
+
+ # Check: Tcp packets with dest port 80 starting from pgw VNF should go through Firewall VNF.
+ pcap_file = 'l2test_firewall.pcap'
+ # Start tcpdump in firewall vnf and start sending tcp packets from pgw vnf
+ e_code, _, _ = ssh_sessions['firewall'].run_command(
+ 'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_file), max_wait=4)
+ e_code, _, _ = ssh_sessions['pgw'].run_command('sudo nc {router_ip} 80 -w 0'.format(router_ip=vm_cp_ips['router']))
+
+ # Copy pcap file from firewall vnf for packet analysis
+ time.sleep(10)
+ assert ssh_sessions['firewall'].get(pcap_file, pcap_file)
+ assert pcap_analysis(pcap_file, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=TCP_PROTOCOL)
+
+
+ # Check: Udp packets with source port 80 starting from router VNF should go through nat->dpi
+ pcap_nat = 'l2test_nat1.pcap'
+ pcap_dpi = 'l2test_dpi1.pcap'
+ # Start tcpdump in nat, dpi vnf and start sending udp packets from router vnf
+ e_code, _, _ = ssh_sessions['nat'].run_command(
+ 'sudo tcpdump -i eth1 -w {pcap} & sleep 15; sudo kill $!'.format(pcap=pcap_nat), max_wait=4)
+ e_code, _, _ = ssh_sessions['dpi'].run_command(
+ 'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_dpi), max_wait=4)
+ e_code, _, _ = ssh_sessions['router'].run_command(
+ 'echo -n "hello" | sudo nc -4u {pgw_ip} 1000 -s {router_ip} -p 80 -w 0'.format(pgw_ip=vm_cp_ips['pgw'],
+ router_ip=vm_cp_ips[
+ 'router']))
+
+ # Copy pcap file from nat, dpi vnf for packet analysis
+ time.sleep(10)
+ assert ssh_sessions['nat'].get(pcap_nat, pcap_nat)
+ assert ssh_sessions['dpi'].get(pcap_dpi, pcap_dpi)
+ packet_ts_nat = pcap_analysis(pcap_nat, vm_cp_ips['router'], vm_cp_ips['pgw'], src_port=80, protocol=UDP_PROTOCOL)
+ packet_ts_dpi = pcap_analysis(pcap_dpi, vm_cp_ips['router'], vm_cp_ips['pgw'], src_port=80, protocol=UDP_PROTOCOL)
+ assert packet_ts_nat
+ assert packet_ts_dpi
+ assert packet_ts_nat < packet_ts_dpi # Packet flow must follow nat -> dpi
+
+
+ # Check: Udp packets with dest port 80 starting from pgw VNF should go through dpi->nat
+ pcap_nat = 'l2test_nat2.pcap'
+ pcap_dpi = 'l2test_dpi2.pcap'
+ # Start tcpdump in nat, dpi vnf and start sending udp packets from router vnf
+ e_code, _, _ = ssh_sessions['nat'].run_command(
+ 'sudo tcpdump -i eth1 -w {pcap} & sleep 15; sudo kill $!'.format(pcap=pcap_nat), max_wait=4)
+ e_code, _, _ = ssh_sessions['dpi'].run_command(
+ 'sudo tcpdump -i eth1 -w {pcap} & sleep 10; sudo kill $!'.format(pcap=pcap_dpi), max_wait=4)
+ e_code, _, _ = ssh_sessions['pgw'].run_command(
+ 'echo -n "hello" | sudo nc -4u {router_ip} 80 -w 0'.format(router_ip=vm_cp_ips['router']))
+
+ # Copy pcap file from nat, dpi vnf for packet analysis
+ time.sleep(10)
+ assert ssh_sessions['nat'].get(pcap_nat, pcap_nat)
+ assert ssh_sessions['dpi'].get(pcap_dpi, pcap_dpi)
+ packet_ts_nat = pcap_analysis(pcap_nat, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=UDP_PROTOCOL)
+ packet_ts_dpi = pcap_analysis(pcap_dpi, vm_cp_ips['pgw'], vm_cp_ips['router'], dst_port=80, protocol=UDP_PROTOCOL)
+ assert packet_ts_nat
+ assert packet_ts_dpi
+ # The below assert used to fail while testing. ts_dpi is ahead of ts_nat in few microseconds
+ # Need to confirm if thats expected
+ assert packet_ts_dpi < packet_ts_nat # Packet flow must follow dpi -> nat
@pytest.mark.depends('nsr')
@pytest.mark.setup('nfvi')
@pytest.mark.incremental
class TestNfviMetrics(object):
+ @pytest.mark.skipif(True, reason='NFVI metrics are disabled - RIFT-15789')
def test_records_present(self, proxy):
assert_records(proxy)
@pytest.mark.depends('nfvi')
@pytest.mark.incremental
+@pytest.mark.skipif(pytest.config.getoption("--port-sequencing"), reason="Skip this for port-sequencing test")
class TestRecordsDescriptors:
- def test_create_update_vnfd(self, proxy, updated_ping_pong_records):
+ def test_create_update_vnfd(self, proxy, updated_ping_pong_descriptors):
"""
Verify VNFD related operations
Asserts:
If a VNFD record is created
"""
- ping_vnfd, pong_vnfd, _ = updated_ping_pong_records
- vnfdproxy = proxy(RwVnfdYang)
+ ping_vnfd, pong_vnfd, _ = updated_ping_pong_descriptors
+ vnfdproxy = proxy(RwProjectVnfdYang)
- for vnfd_record in [ping_vnfd, pong_vnfd]:
- xpath = "/vnfd-catalog/vnfd"
- vnfdproxy.create_config(xpath, vnfd_record.vnfd)
+ for vnfd in [ping_vnfd, pong_vnfd]:
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd"
+ vnfdproxy.create_config(xpath, vnfd)
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
- vnfd = vnfdproxy.get(xpath)
- assert vnfd.id == vnfd_record.id
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd.id))
+ updated_vnfd = vnfdproxy.get(xpath)
+ assert updated_vnfd.id == vnfd.id
- vnfdproxy.replace_config(xpath, vnfd_record.vnfd)
+ vnfdproxy.replace_config(xpath, vnfd)
- def test_create_update_nsd(self, proxy, updated_ping_pong_records):
+ def test_create_update_nsd(self, proxy, updated_ping_pong_descriptors):
"""
Verify NSD related operations
Asserts:
If NSD record was created
"""
- _, _, ping_pong_nsd = updated_ping_pong_records
- nsdproxy = proxy(NsdYang)
+ _, _, ping_pong_nsd = updated_ping_pong_descriptors
+ nsdproxy = proxy(RwProjectNsdYang)
- xpath = "/nsd-catalog/nsd"
- nsdproxy.create_config(xpath, ping_pong_nsd.descriptor)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd"
+ nsdproxy.create_config(xpath, ping_pong_nsd)
- xpath = "/nsd-catalog/nsd[id='{}']".format(ping_pong_nsd.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(ping_pong_nsd.id))
nsd = nsdproxy.get(xpath)
assert nsd.id == ping_pong_nsd.id
- nsdproxy.replace_config(xpath, ping_pong_nsd.descriptor)
+ nsdproxy.replace_config(xpath, ping_pong_nsd)
@brief Pingpong scaling system test
"""
+import gi
import os
import pytest
+import re
import subprocess
import sys
import time
from gi.repository import (
NsrYang,
- NsdYang,
+ RwProjectNsdYang,
VnfrYang,
RwNsrYang,
- RwNsdYang,
RwVnfrYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.mark.setup('pingpong_nsd')
@pytest.mark.depends('launchpad')
class TestSetupPingpongNsd(object):
def test_onboard(self, mgmt_session, descriptors):
for descriptor in descriptors:
- rift.auto.descriptor.onboard(mgmt_session.host, descriptor)
+ rift.auto.descriptor.onboard(mgmt_session, descriptor)
def test_install_sar(self, mgmt_session):
- install_cmd = 'ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
- mgmt_ip=mgmt_session.host,
- )
+ get_platform_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- python3 -mplatform'
+ platform_result = subprocess.check_output(get_platform_cmd.format(host=mgmt_session.host), shell=True)
+ platform_match = re.search('(Ubuntu|fedora)-(\d+)', platform_result.decode('ascii'))
+ assert platform_match is not None
+ (dist, ver) = platform_match.groups()
+ if dist == 'fedora':
+ install_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo yum install sysstat --assumeyes'.format(
+ host=mgmt_session.host,
+ )
+ elif dist == 'Ubuntu':
+ install_cmd = 'ssh {host} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sudo apt-get -q -y install sysstat'.format(
+ host=mgmt_session.host,
+ )
subprocess.check_call(install_cmd, shell=True)
-
@pytest.fixture(scope='function', params=[5,10,15,20,25])
def service_count(request):
'''Fixture representing the number of services to test'''
def test_scaling(self, mgmt_session, cloud_account_name, service_count):
def start_services(mgmt_session, desired_service_count, max_attempts=3):
- catalog = mgmt_session.proxy(NsdYang).get_config('/nsd-catalog')
+ catalog = mgmt_session.proxy(RwProjectNsdYang).get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
- nsr_path = "/ns-instance-config"
+ nsr_path = "/rw-project:project[rw-project:name='default']/ns-instance-config"
nsr = mgmt_session.proxy(RwNsrYang).get_config(nsr_path)
service_count = len(nsr.nsr)
while attempts < max_attempts and service_count < desired_service_count:
attempts += 1
+ old_opdata = mgmt_session.proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
for count in range(service_count, desired_service_count):
nsr = rift.auto.descriptor.create_nsr(
cloud_account_name,
"pingpong_%s" % str(uuid.uuid4().hex[:10]),
- nsd.id)
- mgmt_session.proxy(RwNsrYang).create_config('/ns-instance-config/nsr', nsr)
+ nsd)
+ mgmt_session.proxy(RwNsrYang).create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
+
+ time.sleep(10)
- ns_instance_opdata = mgmt_session.proxy(RwNsrYang).get('/ns-instance-opdata')
- for nsr in ns_instance_opdata.nsr:
+ new_opdata = mgmt_session.proxy(RwNsrYang).get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
+ new_ns_instance_config_refs = {nsr.ns_instance_config_ref for nsr in new_opdata.nsr} - {nsr.ns_instance_config_ref for nsr in old_opdata.nsr}
+ for ns_instance_config_ref in new_ns_instance_config_refs:
try:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
- mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=180)
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/config-status".format(nsr.ns_instance_config_ref)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(ns_instance_config_ref))
+ mgmt_session.proxy(RwNsrYang).wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(ns_instance_config_ref))
mgmt_session.proxy(RwNsrYang).wait_for(xpath, "configured", fail_on=['failed'], timeout=450)
service_count += 1
+ attempts = 0 # Made some progress so reset the number of attempts remaining
except rift.auto.session.ProxyWaitForError:
- mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.ns_instance_config_ref))
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(ns_instance_config_ref)))
+ time.sleep(5)
def monitor_launchpad_performance(service_count, interval=30, samples=1):
sar_cmd = "ssh {mgmt_ip} -q -n -o BatchMode=yes -o StrictHostKeyChecking=no -- sar -A {interval} {samples}".format(
class TestTeardownPingpongNsr(object):
def test_teardown_nsr(self, mgmt_session):
- ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/ns-instance-config')
+ ns_instance_config = mgmt_session.proxy(RwNsrYang).get_config('/rw-project:project[rw-project:name="default"]/ns-instance-config')
for nsr in ns_instance_config.nsr:
- mgmt_session.proxy(RwNsrYang).delete_config("/ns-instance-config/nsr[id='{}']".format(nsr.id))
+ mgmt_session.proxy(RwNsrYang).delete_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id={}]".format(quoted_key(nsr.id)))
time.sleep(60)
- vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/vnfr-catalog')
+ vnfr_catalog = mgmt_session.proxy(RwVnfrYang).get('/rw-project:project[rw-project:name="default"]/vnfr-catalog')
assert vnfr_catalog is None or len(vnfr_catalog.vnfr) == 0
def test_generate_plots(self):
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+import itertools
+import random
+import os
+import gi
+
+import rift.auto.session
+import rift.auto.mano
+
+gi.require_version('RwAuthExtWebSvcYang', '1.0')
+gi.require_version('RwAuthExtUserYang', '1.0')
+from gi.repository import (
+ RwAuthExtWebSvcYang,
+ RwAuthExtUserYang,
+ )
+
+@pytest.fixture(scope='session')
+def auto_certs_dir():
+ """Fixture that returns path of certs specific to automation"""
+ return os.path.join(os.getenv('RIFT_INSTALL'), 'usr/rift/systemtest/config/ssl')
+
+@pytest.fixture(scope='session')
+def set_webauth_cert_choice(tbac):
+ """Fixture that retuns a boolean value indicating whether to configure new key & cert in launchpad"""
+ if not tbac:
+ return False
+ # return random.choice([True, False])
+ return True
+
+@pytest.fixture(scope='session', autouse=True)
+def configure_key_cert(logger, set_webauth_cert_choice, auto_certs_dir, mgmt_session, confd_host, rw_user_proxy,
+ user_domain, ):
+ """Configures new cert, key in webauth-server-config, webauth-client-config"""
+ if set_webauth_cert_choice:
+ logger.debug('Configuring new certs from this path: {}'.format(auto_certs_dir))
+ print('Configuring new certs from this path: {}'.format(auto_certs_dir))
+ else:
+ return
+
+ cert_path = os.path.join(auto_certs_dir, 'rift_auto.crt')
+ key_path = os.path.join(auto_certs_dir, 'rift_auto.key')
+
+ server_ssl_config_xpath = '/rw-auth-ext-web-svc:webauth-server-config/rw-auth-ext-web-svc:ssl-config'
+ client_config_xpath = '/rw-auth-ext-user:webauth-client-config'
+ webauth_server_proxy = mgmt_session.proxy(RwAuthExtWebSvcYang)
+ webauth_client_proxy = mgmt_session.proxy(RwAuthExtUserYang)
+
+ def configure_webauth_server():
+ logger.debug('configuring the webauth-server')
+ webauth_server_obj = RwAuthExtWebSvcYang.YangData_RwAuthExtWebSvc_WebauthServerConfig_SslConfig.from_dict(
+ {'server_cert_path': cert_path, 'server_key_path': key_path})
+ webauth_server_proxy.replace_config(server_ssl_config_xpath, webauth_server_obj)
+
+ def configure_webauth_client():
+ logger.debug('configuring the webauth-client')
+ webauth_client_obj = RwAuthExtUserYang.YangData_RwAuthExtUser_WebauthClientConfig.from_dict(
+ {'ca_cert_path': cert_path})
+ webauth_client_proxy.merge_config(client_config_xpath, webauth_client_obj)
+
+ # Check if its running after launchpad reload; if so skip configuring the certs again (RIFT-17641)
+ server_ssl_config = webauth_server_proxy.get_config(server_ssl_config_xpath)
+ if server_ssl_config.server_cert_path != cert_path:
+ user, password = ['demo']*2
+ logger.debug('Adding an external user {}'.format(user))
+ rift.auto.mano.create_user(rw_user_proxy, user, password, user_domain)
+
+ # Shuffling the function calls for server and client configuration
+ list_func = [configure_webauth_server, configure_webauth_client]
+ random.shuffle(list_func)
+
+ # configuring either of the server or client
+ list_func.pop()()
+
+ # Try getting access token for an external user; it should fail
+ with pytest.raises(Exception,
+ message='Should not be able to get access token for user {} as certs are not yet configured for both server and client'.format(
+ user)):
+ logger.debug('Trying to get access token for user {}'.format(user))
+ access_token = rift.auto.session.get_access_token(user, password, confd_host)
+ logger.debug('Access token for user {}: {}'.format(user, access_token))
+
+ list_func.pop()()
+
+ # Try getting access token for an external user; it should pass now
+ rift.auto.session.get_access_token(user, password, confd_host)
+
+ # RIFT-17641: Delete user 'demo'
+ rift.auto.mano.delete_user(rw_user_proxy, user, user_domain)
+
+@pytest.fixture(scope='session')
+def all_roles_combinations(all_roles):
+ """Returns a combination of all roles except single combinations i.e if there are a total of N roles, then it
+ returns (2^N-1)-N role combinations.
+ Here, we have 11 roles, so it returns 2047-11=2036 combinations"""
+ all_roles_combinations_ = list()
+ for set_length in range(2, len(all_roles)+1):
+ for roles_combination in itertools.combinations(all_roles, set_length):
+ all_roles_combinations_.append(roles_combination)
+ return tuple(all_roles_combinations_)
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import pytest
+
+from rift.auto.session import NetconfSession, RestconfSession
+import rift.auto.mano
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+from gi.repository import (
+ RwUserYang,
+ RwProjectYang,
+ RwRbacPlatformYang,
+ RwRbacInternalYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def rbac_test_data():
+ """Fixture which returns rbac test data: users, roles, projects being used in the test.
+ users: tuple of user names
+ projects: tuple of project names
+ map_platform_roles: mapping of a user to multiple platform roles
+ map_project_roles: mapping of a user to multiple projects (project, list of roles in that project)"""
+ users = ('admin3', 'user1', 'user2', )
+
+ projects = ('project1', 'project2', )
+
+ map_platform_roles = {
+ 'admin3': ['rw-rbac-platform:platform-admin'],
+ }
+
+ map_project_roles = {
+ 'user1': [
+ ('project1', ['rw-project:project-admin']),
+ ('project2', ['rw-project:project-oper']),
+ ],
+
+ 'user2': [
+ ('project1', ['rw-project:project-admin']),
+ ],
+
+ 'admin3': [],
+ }
+
+ return {'users': users, 'projects': projects, 'roles': (map_platform_roles, map_project_roles)}
+
+
+@pytest.mark.setup('rbac_setup')
+@pytest.mark.incremental
+class TestRbacSetup(object):
+ def test_create_users(self, rbac_test_data, rw_user_proxy, user_domain, rbac_user_passwd, logger):
+ """Creates all users as per rbac test-data and verify if they are successfully created."""
+ users_test_data = rbac_test_data['users']
+
+ # Create all users mentioned in users_test_data
+ for user in users_test_data:
+ rift.auto.mano.create_user(rw_user_proxy, user, rbac_user_passwd, user_domain)
+
+ # Verify users are created
+ user_config = rw_user_proxy.get_config('/user-config')
+ assert user_config
+
+ user_config_test_data = [user.user_name for user in user_config.user if user.user_name in users_test_data]
+ logger.debug('Users: {} have been successfully created'.format(user_config_test_data))
+
+ assert len(user_config_test_data) == len(users_test_data)
+
+ def test_create_projects(self, logger, rw_conman_proxy, rbac_test_data):
+ """Creates all projects as per rbac test-data and verify them."""
+ projects_test_data = rbac_test_data['projects']
+
+ # Create all projects mentioned in projects_test_data and verify if they are created
+ for project in projects_test_data:
+ logger.debug('Creating project {}'.format(project))
+ rift.auto.mano.create_project(rw_conman_proxy, project)
+
+ def test_assign_platform_roles_to_users(self, rbac_platform_proxy, logger, rbac_test_data, user_domain, rw_rbac_int_proxy):
+ """Assign platform roles to an user as per test data mapping and verify them."""
+ platform_roles_test_data, _ = rbac_test_data['roles']
+
+ # Loop through the user & platform-roles mapping and assign roles to the user
+ for user, roles in platform_roles_test_data.items():
+ for role in roles:
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user, user_domain, rw_rbac_int_proxy)
+
+ # Verify if the roles are assigned as per test data mapping
+ platform_config = rbac_platform_proxy.get_config('/rbac-platform-config')
+
+ platform_config_test_data_match = 0
+ logger.debug('Matching platform_roles_test_data with rbac-platform-config')
+ for user in platform_config.user:
+ if user.user_name in platform_roles_test_data:
+ logger.debug('Matched user: {}'.format(user.as_dict()))
+ platform_config_test_data_match += 1
+
+ test_data_user_platform_roles = platform_roles_test_data[user.user_name]
+ assert len(test_data_user_platform_roles) == len(user.role)
+ assert len(test_data_user_platform_roles) == len([role for role in user.role if role.role in test_data_user_platform_roles])
+
+ assert platform_config_test_data_match == len(platform_roles_test_data)
+
+ def test_assign_users_to_projects_roles(self, rbac_test_data, rw_project_proxy, user_domain, rw_rbac_int_proxy):
+ """Assign projects and roles to an user as per test data mapping."""
+ _, project_roles_test_data = rbac_test_data['roles']
+
+ # Loop through the user & (project, role) mapping and asign the project, role to the user
+ for user, project_role_tuple in project_roles_test_data.items():
+ for project, role_list in project_role_tuple:
+ for role in role_list:
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user, project, user_domain, rw_rbac_int_proxy)
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.incremental
+class TestRbacVerification(object):
+ def test_match_rbac_internal(self, mgmt_session, logger, rbac_test_data):
+ """Verifies the test data with rw-rbac-internal"""
+ rbac_intl_proxy = mgmt_session.proxy(RwRbacInternalYang)
+ rbac_intl = rbac_intl_proxy.get('/rw-rbac-internal')
+
+ # Verify users in show rw-rbac-internal
+ users_test_data = rbac_test_data['users']
+ assert len(rbac_intl.user) == len(users_test_data) + 2 # 'admin', 'oper' are two default users
+ users_match = 0
+ for user in rbac_intl.user:
+ if user.user_name in users_test_data:
+ logger.info('User matched: {}'.format(user.as_dict()))
+ users_match += 1
+ assert users_match == len(users_test_data)
+
+ # Verify roles (only project roles mapping, not the platform roles mapping)
+ # Each role in rw-rbac-internal is associated with a project through the field 'keys'. All mapping from users to project
+ # is part of project roles mapping.
+ _, project_roles_test_data = rbac_test_data['roles']
+ for user, project_role_tuple in project_roles_test_data.items():
+ for project, role_list in project_role_tuple:
+ for role in role_list:
+ logger.debug("Matching user: '{}' and its role '{}' in project '{}'".format(user, role, project))
+
+ # Verify there exists a role entry in rw-rbac-internal which matches 'role', 'project'
+ rbac_intl_role = [role_ for role_ in rbac_intl.role if (role_.role==role and role_.keys==project)]
+
+ # Each role is identified through its key 'project'. So there can be only one such role which matches
+ # the above 'role.role==role and role.keys=project'
+ assert len(rbac_intl_role) == 1
+ logger.info('Matched role in rw-rbac-internal: {}'.format(rbac_intl_role[0].as_dict()))
+
+ # Verify the user list in this rw-rbac-internal role carries 'user'
+ assert len([user_ for user_ in rbac_intl_role[0].user if user_.user_name==user]) == 1
+
+ def test_role_access(self, logger, session_class, confd_host, rbac_test_data, rbac_user_passwd, project_keyed_xpath):
+ """Verifies the roles assigned to users for a project. Login as each user and verify the user can only access
+ the projects linked to it."""
+ _, project_roles_test_data = rbac_test_data['roles']
+ projects_test_data = rbac_test_data['projects']
+
+ for user, project_role_tuple in project_roles_test_data.items():
+ logger.debug('Verifying user: {}'.format(user))
+ projects_not_accessible = list(projects_test_data)
+
+ # Establish a session with this current user
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ print ("Connected using username {} password {}".format(user, rbac_user_passwd))
+
+ rw_project_proxy_ = user_session.proxy(RwProjectYang)
+
+ if project_role_tuple: # Skip the for loop for users who are not associated with any project e.g admin3
+ for project, role_list in project_role_tuple:
+ projects_not_accessible.remove(project)
+ project_config = rw_project_proxy_.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-config')
+ user_ = [user_ for user_ in project_config.user if user_.user_name==user]
+ logger.debug('User: {}'.format(user_[0].as_dict()))
+ assert len(user_) == 1
+
+ # Match the roles for this user
+ assert set(role_list) == set([role_.role for role_ in user_[0].role])
+
+ # It can't access any other project.
+ for project in projects_not_accessible:
+ assert rw_project_proxy_.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-config') is None # It should
+ # return None as the project is not mapped to this user.
+
+ def test_admin_user(self, logger, rw_project_proxy, project_keyed_xpath, rbac_test_data):
+ """Verify admin can see all projects as part of test-data as well as the default project"""
+ projects_test_data = rbac_test_data['projects']
+ projects_test_data = projects_test_data + ('default', )
+
+ # Verify admin user can see all projects including default
+ # If it is post-reboot verification, then check default project should not be listed
+ for project in projects_test_data:
+ project_ = rw_project_proxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project))+'/project-state', list_obj=True)
+ if project=='default' and pytest.config.getoption('--default-project-deleted'):
+ assert project_ is None
+ continue
+ assert project_ # If the project doesn't exist, it returns None
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.teardown('rbac_setup')
+@pytest.mark.incremental
+class TestRbacTeardown(object):
+ def test_delete_default_project(self, logger, rw_conman_proxy):
+ """Only deletes the default project"""
+ logger.debug('Deleting the default project')
+ rift.auto.mano.delete_project(rw_conman_proxy, 'default')
+
+ def test_delete_projects(self, logger, rbac_test_data, rw_conman_proxy):
+ """Deletes the projects which are part of rbac test-data and verify their deletion"""
+ projects_test_data = rbac_test_data['projects']
+
+ # Delete the projects
+ for project in projects_test_data:
+ logger.debug('Deleting project {}'.format(project))
+ rift.auto.mano.delete_project(rw_conman_proxy, project)
+
+ def test_delete_users(self, logger, rw_user_proxy, rbac_platform_proxy, platform_config_keyed_xpath,
+ user_keyed_xpath, rbac_test_data, user_domain):
+ """Deletes the users which are part of rbac test-data and verify their deletion"""
+ users_test_data = rbac_test_data['users']
+ map_platform_roles, _ = rbac_test_data['roles']
+
+ # Deletes the users
+ # If an user is associated with a platform role, at first it needs be removed from rbac-platform-config
+ # before deleting it from user-config
+ for user in users_test_data:
+ if user in map_platform_roles:
+ rbac_platform_proxy.delete_config(platform_config_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+ rw_user_proxy.delete_config(user_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+ # Verify if the users are deleted
+ user_config = rw_user_proxy.get_config('/user-config')
+ default_users = [user.user_name for user in user_config.user]
+
+ logger.debug('Default users list: {}'.format(default_users))
+ expected_empty_user_list = [user for user in users_test_data if user in default_users]
+ assert not expected_empty_user_list
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#This file contains the code for RIFT-16314, RIFT-16315, RIFT-16536,
+#RIFT-16537, RIFT-16541, RIFT-16313, RIFT-16692, RIFT-16637, RIFT-16636.
+"""
+import gi
+import pytest
+
+import rift.auto.mano
+
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+
+
+from gi.repository import (
+ RwUserYang,
+ RwProjectYang,
+ RwRbacPlatformYang,
+ RwRbacInternalYang,
+ RwlogMgmtYang,
+ RwConmanYang
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.mark.setup('rbac_setup')
+@pytest.mark.incremental
+class TestIdentity(object):
+ """Test Identity."""
+
+ platform_role_users = ['platform_user_admin', 'platform_user_oper', 'platform_user_super_admin']
+ platform_users = ['platform_user_admin', 'platform_user_oper', 'platform_user_test', 'platform_user_super_admin']
+
+ project_roles = (
+ 'rw-project-mano:catalog-oper', 'rw-project-mano:catalog-admin',
+ 'rw-project-mano:lcm-oper', 'rw-project-mano:lcm-admin',
+ 'rw-project-mano:account-oper', 'rw-project-mano:account-admin',
+ 'rw-project:project-admin', 'rw-project:project-oper'
+ )
+ platform_roles = (
+ 'rw-rbac-platform:platform-admin',
+ 'rw-rbac-platform:platform-oper',
+ 'rw-rbac-platform:super-admin'
+ )
+
+ RBAC_PROJECTS = ['default']
+ RBAC_USERS = []
+
+ TEST_PROJECTS = []
+ TEST_USERS = []
+
+ # This is required so as to track the
+ # already deleted users when creation and deletion
+ # are performed in ad-hoc way.
+ # Checking this set allows us to ignore Proxy request
+ # errors when deletion is performed twice.
+ DELETED_PROJECTS_TRACKER = set()
+
+ INVALID_CREDENTIALS = {
+ 'Jason' * 500: 'likeu' * 500
+ }
+
+ POSSIBLY_PROBLEMATIC_CREDENTIALS = {
+ 'Ja#son': ['lik#eu', 'syste#m'],
+ 'Ja&son': ['lik&eu', 'syste&m'],
+ 'J%ason': ['lik%eu', 'syste%m'],
+ 'Jåson': ['likeü', 'system'],
+ '<Jason>': ['<likeu>', '<system>'],
+ '/jason': ['/likeu', '/system;'],
+ 'jason;': ['likeu;', 'system;'],
+ 'j*son': ['like*u;', 'syste*m'],
+ 'j@so?': ['l!keu;', 'system!']
+ }
+
+ INAVLID_LOGIN_CREDENTIALS = {
+ 'wrong_username': 'mypasswd',
+ 'testuser': 0,
+ 0: 'mypasswd',
+ 0: 0,
+ 'wrong_username': 'wrong_password'
+ }
+
+ INVALID_PROJECT_KEYS = ['this_project_doesnt_exist', 'Test01']
+ INVALID_PROJECT_CREATE_KEYS = ['testproject' * 500, ]
+ #POSSIBLY_PROBLEMATIC_KEYS = ['/projectname', 'project name', 'projectname.', 'project,name', 'Projëçt', 'Pro;je:ct', 'Proj*ct', 'Pr@ject']
+ POSSIBLY_PROBLEMATIC_KEYS = ['/projectname', 'project name', 'projectname.', 'project,name', 'Pro;je:ct', 'Proj*ct', 'Pr@ject']
+
+ def test_platform_roles(self, rw_user_proxy, rbac_platform_proxy, rbac_user_passwd, user_domain, session_class, tbac,
+ confd_host, platform_roles, rw_rbac_int_proxy):
+ # Setting users and roles up for upcoming checks
+ rift.auto.mano.create_user(rw_user_proxy, 'platform_user_super_admin', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+ 'platform_user_super_admin', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.create_user(rw_user_proxy, 'platform_user_admin', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin',
+ 'platform_user_admin', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.create_user(rw_user_proxy, 'platform_user_oper', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+ 'platform_user_oper', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.create_user(rw_user_proxy, 'platform_user_test', rbac_user_passwd, user_domain)
+
+ """Various access tests for platform users"""
+
+ # Testing if platform role users have access to /rbac-platform-config
+ for user in self.platform_role_users:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ pxy = user_session.proxy(RwRbacPlatformYang)
+ access_ = pxy.get_config("/rbac-platform-config/user[user-name='platform_user_admin'][user-domain={}]"
+ .format(quoted_key(user_domain)))
+ assert access_ is not None
+ rift.auto.mano.close_session(user_session)
+
+ # Testing if platform role users have access to /rbac-platform-state
+ for user in self.platform_role_users:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ pxy = user_session.proxy(RwRbacPlatformYang)
+ access_ = pxy.get_config("/rbac-platform-state/user[user-name='platform_user_admin'][user-domain={}]"
+ .format(quoted_key(user_domain)))
+ if user == 'platform_user_oper':
+ assert access_ is None
+ else:
+ """At the time of writing this code, /rbac-platform-state/user is unpopulated and so the access_ will be None no matter what.
+ In the future when the path /rbac-platform-state/user is populated this test will break. When that happens, just change
+ the next line to 'access_ is not None'
+ """
+ assert access_ is None
+ rift.auto.mano.close_session(user_session)
+
+ """Changing roles and verifying it """
+
+ # Case 01 Assign and then revoke that role. Assign a second role and see if that sticks and that the older role hasn't stayed on.
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+ 'platform_user_test', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+ 'platform_user_test', user_domain)
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin',
+ 'platform_user_test', user_domain, rw_rbac_int_proxy)
+ # If the older role didn't stick and the new role did stick (as it should), then the user should be able to change another users password
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'platform_user_test', rbac_user_passwd)
+ pxy = user_session.proxy(RwUserYang)
+ rift.auto.mano.update_password(pxy, 'platform_user_oper', 'even_newer_password', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(user_session)
+
+ # Case 02 Switching the roles back after Case 01
+ rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, 'rw-rbac-platform:platform-admin',
+ 'platform_user_test', user_domain)
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, 'rw-rbac-platform:platform-oper',
+ 'platform_user_test', user_domain, rw_rbac_int_proxy)
+ # If the older role didn't stick and the new role did stick (as it should), then the user shouldn't be able to change another users password
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'platform_user_test', rbac_user_passwd)
+ pxy = user_session.proxy(RwUserYang)
+ with pytest.raises(Exception, message="User shouldn't be able to change another user's password") as excinfo:
+ rift.auto.mano.update_password(pxy, 'platform_user_oper', 'new_password', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(user_session)
+
+ if not tbac:
+ """Disabling and enabling users and verifying it"""
+
+ rift.auto.mano.create_user(rw_user_proxy, 'disabled_user', rbac_user_passwd, user_domain)
+ rift.auto.mano.update_password(rw_user_proxy, 'platform_user_oper', rbac_user_passwd, user_domain, rw_rbac_int_proxy)
+ # Checking if the disabled user can login
+ rift.auto.mano.disable_user(rw_user_proxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+ with pytest.raises(Exception, message="User shouldn't be able to login as he is disabled") as excinfo:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'disabled_user', rbac_user_passwd, timeout=5)
+ # Checking if he can login after he has been enabled back on.
+ rift.auto.mano.enable_user(rw_user_proxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'disabled_user', rbac_user_passwd)
+ rift.auto.mano.close_session(user_session)
+ # All platform roles trying to change the status of a user
+ for user in self.platform_role_users:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ pxy = user_session.proxy(RwUserYang)
+ if user == 'platform_user_oper':
+ with pytest.raises(Exception, message="Platform oper shouldn't be able to disable other users") as excinfo:
+ rift.auto.mano.disable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+ else:
+ rift.auto.mano.disable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.enable_user(pxy, 'disabled_user', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(user_session)
+
+ # Testing if users can change their own passwords
+ for user in self.platform_users:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ pxy = user_session.proxy(RwUserYang)
+ rift.auto.mano.update_password(pxy, user, 'new_password', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(user_session)
+
+ # Testing if platform role users can change the password of another user
+ for idx, user in enumerate(self.platform_role_users, 1):
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, 'new_password')
+ pxy = user_session.proxy(RwUserYang)
+ if user == 'platform_user_oper':
+ with pytest.raises(Exception, message="User shouldn't be able to change another user's password") as excinfo:
+ rift.auto.mano.update_password(pxy, 'platform_user_test', 'even_newer_password_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+ else:
+ rift.auto.mano.update_password(pxy, 'platform_user_test', 'even_newer_password_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(user_session)
+
+ # Testing if platform users have access to logging
+ for user in self.platform_role_users:
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, 'new_password')
+ pxy = user_session.proxy(RwlogMgmtYang)
+ access_ = pxy.get_config('/logging')
+ assert access_ is not None
+ rpc_input = RwlogMgmtYang.YangInput_RwlogMgmt_ShowLogs.from_dict({'all': 'None'})
+ pxy.rpc(rpc_input)
+ rpc_input_1 = RwlogMgmtYang.YangInput_RwlogMgmt_LogEvent.from_dict({'on': 'None'})
+ pxy.rpc(rpc_input_1)
+ rift.auto.mano.close_session(user_session)
+
+ def rbac_internal_check(self, mgmt_session, xpath):
+
+ rbac_intl_proxy = mgmt_session.proxy(RwRbacInternalYang)
+ rbac_intl_proxy.wait_for(xpath, "active", timeout=5)
+
+ def test_rbac_internal_verification(self, rw_user_proxy, rw_conman_proxy, rbac_user_passwd, user_domain, mgmt_session,
+ rw_project_proxy, rbac_platform_proxy, rw_rbac_int_proxy):
+ """Doing various tasks and verifying if rbac-internal is reflecting these changes properly"""
+
+ # Creating projects and users for verifying the rbac-internal scenario
+ for idx in range(1, 4):
+ project_name = 'rbac_project_{}'.format(idx)
+ rift.auto.mano.create_project(rw_conman_proxy, project_name)
+ self.RBAC_PROJECTS.append(project_name)
+
+ if project_name in self.DELETED_PROJECTS_TRACKER:
+ self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+ for idx in range(1, 5):
+ rift.auto.mano.create_user(rw_user_proxy, 'rbac_user_{}'.format(idx), rbac_user_passwd, user_domain)
+ self.RBAC_USERS.append('rbac_user_{}'.format(idx))
+
+ # Rbac-Internal Verification
+ project_order = [0, 1, 2, 3, 0]
+ xpath = '/rw-rbac-internal/role[role={role}][keys={project}]/user[user-name={user}][user-domain={domain}]/state-machine/state'
+ # Assigning four users to four projects with two project roles for each user and checking the rbac-internal
+ for idx in range(0, 4):
+ fdx = project_order[idx]
+ ldx = project_order[idx + 1]
+ role = self.project_roles[2 * idx]
+ role1 = self.project_roles[(2 * idx) + 1]
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.RBAC_USERS[idx],
+ self.RBAC_PROJECTS[fdx], user_domain, rw_rbac_int_proxy)
+ self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role), project=quoted_key(self.RBAC_PROJECTS[fdx]),
+ user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role1, self.RBAC_USERS[idx],
+ self.RBAC_PROJECTS[ldx], user_domain, rw_rbac_int_proxy)
+ self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role1), project=quoted_key(self.RBAC_PROJECTS[ldx]),
+ user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+ # Deleting the four projects and then checking rw-rbac-internal
+ for project_name in self.RBAC_PROJECTS:
+ rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+ print ("Deleting project: {}".format(project_name))
+ self.DELETED_PROJECTS_TRACKER.add(project_name)
+
+ for idx in range(0, 4):
+ fdx = project_order[idx]
+ ldx = project_order[idx + 1]
+ role = self.project_roles[2 * idx]
+ role1 = self.project_roles[(2 * idx) + 1]
+
+ with pytest.raises(Exception, message="This user {} (with this role {} and project {}) shouldn't be on rbac-internal."
+ .format(self.RBAC_USERS[idx], role, self.RBAC_PROJECTS[fdx])) as excinfo:
+ self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role), project=quoted_key(self.RBAC_PROJECTS[fdx]),
+ user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+ with pytest.raises(Exception, message="This user {} (with this role {} and project {}) shouldn't be on rbac-internal."
+ .format(self.RBAC_USERS[idx], role1, self.RBAC_PROJECTS[ldx])) as excinfo:
+ self.rbac_internal_check(mgmt_session, xpath.format(role=quoted_key(role1), project=quoted_key(self.RBAC_PROJECTS[ldx]),
+ user=quoted_key(self.RBAC_USERS[idx]), domain=quoted_key(user_domain)))
+
+ def test_roles_revoke(self, rw_conman_proxy, rw_user_proxy, rbac_platform_proxy, rw_project_proxy,
+ rbac_user_passwd, user_domain, rw_rbac_int_proxy):
+ """Assigning all the roles and then revoking them"""
+
+ # Creating users and assigning each of them a role
+ rift.auto.mano.create_project(rw_conman_proxy, 'test01')
+ for incrementor, role in enumerate(self.project_roles + self.platform_roles, 1):
+ user_name = 'test_user_{}'.format(incrementor)
+ rift.auto.mano.create_user(rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+
+ if 'platform' in role:
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user_name, user_domain, rw_rbac_int_proxy)
+ else:
+
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user_name, 'test01', user_domain, rw_rbac_int_proxy)
+
+ # Removing the assigned roles from each user
+ for incrementor, role in enumerate(self.project_roles + self.platform_roles, 1):
+ user_name = 'test_user_{}'.format(incrementor)
+ if 'platform' in role:
+ rift.auto.mano.revoke_platform_role_from_user(rbac_platform_proxy, role, user_name, user_domain)
+ rift.auto.mano.revoke_user_from_platform_config(rbac_platform_proxy, user_name, user_domain)
+ else:
+ rift.auto.mano.revoke_project_role_from_user(rw_project_proxy, role, user_name, 'test01', user_domain)
+
+ def test_misbehaviours(
+ self, rw_user_proxy, rbac_user_passwd, user_domain,
+ session_class, confd_host, tbac, rw_rbac_int_proxy):
+ """Verify if bad credentials can cause any problems."""
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'testuser', rbac_user_passwd, user_domain)
+ # Trying to login with an incorrect password multiple times
+ counter = 1
+ while(counter < 4):
+ with pytest.raises(
+ Exception,
+ message="User was able to login with the wrong password"
+ ):
+ rift.auto.mano.get_session(
+ session_class, confd_host, 'testuser', 'wrong_password',
+ timeout=5)
+ counter += 1
+
+ # Trying to login with INAVLID_LOGIN_CREDENTIALS
+ for uname, passwd in self.INAVLID_LOGIN_CREDENTIALS.items():
+ with pytest.raises(
+ Exception,
+ message="User logged im with invalid login credentials"
+ ):
+ rift.auto.mano.get_session(
+ session_class, confd_host, uname, passwd, timeout=5)
+ # Creating a user with POSSIBLY_PROBLEMATIC_CREDENTIALS
+ if tbac:
+ for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+ rift.auto.mano.create_user(
+ rw_user_proxy, uname,
+ passwd[0],
+ passwd[1]
+ )
+ else:
+ for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+ rift.auto.mano.create_user(
+ rw_user_proxy, uname,
+ passwd[0],
+ user_domain
+ )
+ # Creating a user with INVALID_CREDENTIALS
+ for username, password in self.INVALID_CREDENTIALS.items():
+ with pytest.raises(
+ Exception,
+ message="User created with invalid credentials"
+ ):
+ rift.auto.mano.create_user(
+ rw_user_proxy, username, password, user_domain)
+ # Delete the users created with POSSIBLY_PROBLEMATIC_CREDENTIALS
+ if tbac:
+ for uname, domain in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+ rift.auto.mano.delete_user(
+ rw_user_proxy, uname,
+ domain[1]
+ )
+ else:
+ for uname, passwd in self.POSSIBLY_PROBLEMATIC_CREDENTIALS.items():
+ rift.auto.mano.delete_user(
+ rw_user_proxy, uname, user_domain
+ )
+
+ def test_project_keys(
+ self, rw_project_proxy, rbac_user_passwd, session_class,
+ confd_host):
+ """Trying to access/create various projects with bad project keys."""
+ # Checking if INVALID_PROJECT_KEYS can be accessed.
+ for project_name in self.INVALID_PROJECT_KEYS:
+ project_cm_config_xpath = '/project[name={project_name}]/project-state'
+ project_ = rw_project_proxy.get_config(
+ project_cm_config_xpath.format(
+ project_name=quoted_key(project_name)
+ ),
+ list_obj=True
+ )
+ assert project_ is None
+ # Trying to create projects with INVALID_PROJECT_CREATE_KEYS
+ for project_name in self.INVALID_PROJECT_CREATE_KEYS:
+ with pytest.raises(
+ Exception,
+ message="Project created with the INVALID_PROJECT_CREATE_KEYS"
+ ):
+ rift.auto.mano.create_project(rw_conman_proxy, project_name)
+ # These POSSIBLY_PROBLEMATIC_KEYS should not cause any error in theory.
+ for project_name in self.POSSIBLY_PROBLEMATIC_KEYS:
+ rift.auto.mano.create_project(rw_project_proxy, project_name)
+ # User trying to access a project he has no access to.
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, 'test_user_11', rbac_user_passwd)
+ pxy = user_session.proxy(RwConmanYang)
+ project_ = pxy.get_config(
+ project_cm_config_xpath.format(
+ project_name=quoted_key('test01')
+ )
+ )
+ assert project_ is None
+ rift.auto.mano.close_session(user_session)
+
+ def test_project_testing(self, rw_conman_proxy, rw_user_proxy, rw_project_proxy, rbac_user_passwd, user_domain, rw_rbac_int_proxy):
+ """Multiple projects creation, deletion, re-addition with verification every step of the way"""
+
+ # Creating projects and users for this test case
+ for idx in range(1,5):
+ project_name = 'testing_project_{}'.format(idx)
+ rift.auto.mano.create_project(rw_conman_proxy, project_name)
+ self.TEST_PROJECTS.append(project_name)
+ if project_name in self.DELETED_PROJECTS_TRACKER:
+ self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+ for idx in range(1,9):
+ rift.auto.mano.create_user(rw_user_proxy, 'testing_user_{}'.format(idx), rbac_user_passwd, user_domain)
+ self.TEST_USERS.append('testing_user_{}'.format(idx))
+
+ # Assigning project roles to users
+ for idx in range(0,8):
+ role = self.project_roles[idx]
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.TEST_USERS[idx],
+ self.TEST_PROJECTS[idx//2], user_domain, rw_rbac_int_proxy)
+
+ # Deleting all test projects
+ for project_name in self.TEST_PROJECTS:
+ rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+ self.DELETED_PROJECTS_TRACKER.add(project_name)
+
+ # Recreating all the deleted projects
+ for project_name in self.TEST_PROJECTS:
+ rift.auto.mano.create_project(rw_conman_proxy, project_name)
+ if project_name in self.DELETED_PROJECTS_TRACKER:
+ self.DELETED_PROJECTS_TRACKER.remove(project_name)
+
+ # Check if the recreated projects have the old users assigned to them still.
+ for idx in range(0,8):
+ role = self.project_roles[idx]
+ role_keyed_path = "/project[name={project}]/project-config/user[user-name={user}][user-domain={domain}]/role[role={user_role}]"
+ role_ = rw_project_proxy.get_config(role_keyed_path.format(project=quoted_key(self.TEST_PROJECTS[idx//2]),
+ user=quoted_key(self.TEST_USERS[idx]), domain=quoted_key(user_domain), user_role=quoted_key(role)))
+ assert role_ is None, "This user shouldn't exist in this project which was just created"
+
+ # Reassigning the old users to their old roles.
+ for idx in range(0,8):
+ role = self.project_roles[idx]
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, self.TEST_USERS[idx],
+ self.TEST_PROJECTS[idx//2], user_domain, rw_rbac_int_proxy)
+
+
+@pytest.mark.depends('rbac_setup')
+@pytest.mark.teardown('rbac_setup')
+@pytest.mark.incremental
+class TestTeardown(object):
+ """Class Teardown."""
+
+ def test_delete_projects(self, rw_conman_proxy):
+ invalid_projects = TestIdentity.POSSIBLY_PROBLEMATIC_KEYS + ['test01']
+ valid_projects = TestIdentity.TEST_PROJECTS + TestIdentity.RBAC_PROJECTS
+ all_projects = valid_projects + invalid_projects
+
+ for project_name in all_projects:
+ try:
+ rift.auto.mano.delete_project(rw_conman_proxy, project_name)
+ except rift.auto.session.ProxyRequestError as e:
+ if project_name in TestIdentity.DELETED_PROJECTS_TRACKER:
+ print ("Project {} is already deleted".format(project_name))
+ elif project_name not in invalid_projects:
+ print ("Failed to delete project: {}".format(project_name))
+ raise e
+
+ def test_delete_users(self, rw_user_proxy, rbac_platform_proxy, user_domain):
+ users_test_data = ['testuser']
+ for incrementor, role in enumerate(TestIdentity.project_roles + TestIdentity.platform_roles, 1):
+ users_test_data.append('test_user_{}'.format(incrementor))
+
+ for user in TestIdentity.platform_users:
+ users_test_data.append(user)
+
+ # Deletes the users
+ for user in users_test_data+TestIdentity.RBAC_USERS+TestIdentity.TEST_USERS:
+ try:
+ keyed_path = "/rbac-platform-config/user[user-name={user}][user-domain={domain}]"
+ platform_cfg_ent = rbac_platform_proxy.get_config(keyed_path.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+ if platform_cfg_ent is not None:
+ # Delete from the platform-config first.
+ rift.auto.mano.revoke_user_from_platform_config(rbac_platform_proxy, user, user_domain)
+ rift.auto.mano.delete_user(rw_user_proxy, user, user_domain)
+
+ except rift.auto.session.ProxyRequestError as e:
+ if user not in TestIdentity.INAVLID_LOGIN_CREDENTIALS:
+ print ("Deletion of user {} failed".format(user))
+ raise e
+ else:
+ print ("Expected error deleting invalid user {}".format(user))
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import pytest
+import gi
+
+import rift.auto.mano
+import rift.auto.descriptor
+
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwCloudYang', '1.0')
+gi.require_version('RwSdnYang', '1.0')
+gi.require_version('RwLaunchpadYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwImageMgmtYang', '1.0')
+gi.require_version('RwStagingMgmtYang', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+
+from gi.repository import (
+ RwProjectNsdYang,
+ RwProjectVnfdYang,
+ RwCloudYang,
+ RwSdnYang,
+ RwLaunchpadYang,
+ RwVnfrYang,
+ RwNsrYang,
+ RwImageMgmtYang,
+ RwStagingMgmtYang,
+ RwPkgMgmtYang,
+)
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='module')
+def mano_xpaths():
+ """All xpaths which need to be accessed by users with various roles"""
+
+ xpaths_dict = {
+ 'catalog' : ('/vnfd-catalog', '/nsd-catalog'),
+ 'accounts' : ('/cloud', '/sdn'),
+ 'records' : ('/vnfr-catalog', '/vnfr-console', '/ns-instance-config', '/ns-instance-opdata'),
+ 'pkg-mgmt' : ('/staging-areas', '/upload-jobs', '/copy-jobs', '/download-jobs'),
+ 'config-agent': ('/config-agent',),
+ 'ro' : ('/resource-orchestrator',),
+ 'datacenter' : ('/datacenters',),
+ }
+ return xpaths_dict
+
+
+@pytest.fixture(scope='module')
+def mano_roles_xpaths_mapping():
+ """Mano roles and its accessible xpaths mapping"""
+ mano_roles_xpaths_mapping_dict = {
+ 'rw-project:project-admin': ('catalog', 'accounts', 'records', 'pkg-mgmt', 'config-agent', 'ro', 'datacenter'),
+ 'rw-project:project-oper' : ('catalog', 'accounts', 'records', 'pkg-mgmt', 'config-agent', 'ro', 'datacenter'),
+ 'rw-project-mano:catalog-oper' : ('catalog', 'pkg-mgmt'),
+ 'rw-project-mano:catalog-admin' : ('catalog', 'pkg-mgmt'),
+ 'rw-project-mano:lcm-admin' : ('catalog', 'accounts', 'records', 'config-agent', 'datacenter'),
+ 'rw-project-mano:lcm-oper' : ('records',),
+ 'rw-project-mano:account-admin' : ('accounts', 'config-agent', 'ro', 'datacenter'),
+ 'rw-project-mano:account-oper' : ('accounts', 'config-agent', 'ro', 'datacenter'),
+ }
+ return mano_roles_xpaths_mapping_dict
+
+
+@pytest.fixture(scope='module')
+def xpath_module_mapping():
+ """Mano Xpaths and its module mapping. Value also carries config or opdata type along with yang-module"""
+ xpath_module_mapping_dict = {
+ ('/vnfd-catalog',): (RwProjectVnfdYang, 'get_config'),
+ ('/nsd-catalog',): (RwProjectNsdYang, 'get_config'),
+ ('/cloud',): (RwCloudYang, 'get_config'),
+ ('/sdn',): (RwSdnYang, 'get_config'),
+ ('/vnfr-catalog', '/vnfr-console'): (RwVnfrYang, 'get'),
+ ('/ns-instance-config', '/ns-instance-opdata'): (RwNsrYang, 'get'),
+ ('/upload-jobs', '/download-jobs'): (RwImageMgmtYang, 'get'),
+ ('/copy-jobs', ): (RwPkgMgmtYang, 'get'),
+ ('/staging-areas',): (RwStagingMgmtYang, 'get'),
+ ('/resource-orchestrator', '/datacenters'): (RwLaunchpadYang, None),
+ ('/config-agent',): None,
+ }
+ return xpath_module_mapping_dict
+
+@pytest.mark.setup('mano_xpath_access')
+@pytest.mark.depends('nsr')
+@pytest.mark.incremental
+class TestRbacManoXpathAccess(object):
+ def test_copy_nsd_catalog_item(self, mgmt_session):
+ """Copy a NSD catalog item, so that /copy-jobs xpath can be tested."""
+ nsd_path = '/rw-project:project[rw-project:name="default"]/nsd-catalog'
+ nsd = mgmt_session.proxy(RwProjectNsdYang).get_config(nsd_path)
+ nsd_pkg_id = nsd.nsd[0].id
+ rpc_input = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCopy.from_dict(
+ {'package_type': 'NSD', 'package_id': nsd_pkg_id, 'package_name': 'test_nsd_copy',
+ 'project_name': 'default'})
+ mgmt_session.proxy(RwPkgMgmtYang).rpc(rpc_input)
+
+ def test_rbac_mano_xpaths_access(self, mano_xpaths, logger, mano_roles_xpaths_mapping, xpath_module_mapping, session_class,
+ project_keyed_xpath, user_domain, rbac_platform_proxy, rw_project_proxy, rbac_user_passwd, confd_host, rw_user_proxy, rw_rbac_int_proxy):
+ """Verify Mano roles/Permission mapping works (Verifies only read access for all Xpaths)."""
+ project_name = 'default'
+
+ # Skipping download-jobs as it is not yet implemented from MANO side.
+ # Others are skipped becuase they need Juju, Openmano configurations etc.
+ skip_xpaths = ('/download-jobs', '/config-agent', '/resource-orchestrator', '/datacenters', '/upload-jobs')
+
+ for index, (role, xpath_keys_tuple) in enumerate(mano_roles_xpaths_mapping.items()):
+ # Create an user and assign a role
+ user_name = 'user-{}'.format(index)
+ rift.auto.mano.create_user(rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+ logger.debug('Creating an user {} with role {}'.format(user_name, role))
+ if 'platform' in role:
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user_name, user_domain, rw_rbac_int_proxy)
+ else:
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user_name, project_name, user_domain, rw_rbac_int_proxy)
+
+ # Get user session
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user_name, rbac_user_passwd)
+
+ # go through each of its xpaths keys and try to access
+ for xpath_key in xpath_keys_tuple:
+ for xpath in mano_xpaths[xpath_key]:
+ if xpath in skip_xpaths:
+ continue
+ logger.debug('User {} with role {} trying to access xpath {}'.format(user_name, role, xpath))
+ yang_module, get_type = [yang_module for xpath_tuple, yang_module in xpath_module_mapping.items()
+ if xpath in xpath_tuple][0]
+ user_pxy = user_session.proxy(yang_module)
+ get_data_func = getattr(user_pxy, get_type)
+ assert get_data_func(project_keyed_xpath.format(project_name=quoted_key(project_name))+xpath)
+
+ # go through remaining xpaths keys which this user-role not part of and try to access; it should fail
+ access_denied_xpath_keys_tuple = set(mano_xpaths.keys()).difference(xpath_keys_tuple)
+ for xpath_key in access_denied_xpath_keys_tuple:
+ for xpath in mano_xpaths[xpath_key]:
+ if xpath in skip_xpaths:
+ continue
+ logger.debug('User {} with role {} trying to access xpath {}. It should get None'.format(user_name, role, xpath))
+ yang_module, get_type = [yang_module for xpath_tuple, yang_module in xpath_module_mapping.items()
+ if xpath in xpath_tuple][0]
+ user_pxy = user_session.proxy(yang_module)
+ get_data_func = getattr(user_pxy, get_type)
+ assert get_data_func(project_keyed_xpath.format(project_name=quoted_key(project_name))+xpath) is None
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+import collections
+import gi
+import pytest
+import random
+import uuid
+
+import rift.auto.mano
+import rift.auto.descriptor
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwlogMgmtYang', '1.0')
+from gi.repository import (
+ RwUserYang,
+ RwProjectYang,
+ RwConmanYang,
+ RwProjectVnfdYang,
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwRbacPlatformYang,
+ RwlogMgmtYang,
+ RwRedundancyYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+SESSION_CONNECT_TIMEOUT=5
+
+@pytest.fixture(scope='session')
+def user_test_roles():
+ """Returns tuples of roles which enable an user to delete/create a new user"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin')
+ read_roles = tuple()
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def project_test_roles():
+ """Returns tuples of roles which enable an user to create, read, delete a project"""
+ write_roles = ('rw-rbac-platform:super-admin', )
+ read_roles = ('rw-project:project-oper', 'rw-project:project-admin')
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def onboarding_test_roles():
+ """Fixture that returns a tuple of roles which enable an user to onboard/modify/delete a VNF/NS package"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:catalog-admin', 'rw-project:project-admin')
+ read_roles = ('rw-project-mano:catalog-oper', 'rw-project-mano:lcm-admin')
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def account_test_roles():
+ """Fixture that returns a tuple of roles which enable an user to CRUD a VIM, Sdn account"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:account-admin', 'rw-project:project-admin')
+ read_roles = ('rw-project-mano:account-oper', 'rw-project-mano:lcm-admin')
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def ns_instantiate_test_roles():
+ """Fixture that returns a tuple of roles which enable an user to instantiate/terminate a NS
+ Read roles: who all can access vnfr-catalog, vnfr-console, ns-instance-opdata etc"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-project-mano:lcm-admin', 'rw-project:project-admin')
+ read_roles = ('rw-project-mano:lcm-oper', )
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def syslog_server_test_roles():
+ """Fixture that returns a tuple of roles which enable an user set the syslog server_address"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin', 'rw-rbac-platform:platform-oper')
+ read_roles = tuple()
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def redundancy_config_test_roles():
+ """Fixture that returns a tuple of roles which enable an user set the syslog server_address"""
+ write_roles = ('rw-rbac-platform:super-admin', 'rw-rbac-platform:platform-admin')
+ read_roles = ('rw-rbac-platform:platform-oper', )
+ return write_roles, read_roles
+
+
+@pytest.fixture(scope='session')
+def project_acessible():
+ """Fixture that returns name of the project to which all new users will be associated"""
+ return random.choice(['project1', 'default'])
+
+
+# @pytest.fixture(scope='session')
+# def project_not_accessible():
+# """Retruns name of the project whose users are not supposed to access the resources under project 'project_acessible'"""
+# return 'project2'
+
+
+@pytest.fixture(scope='session')
+def users_test_data(rw_user_proxy, rbac_platform_proxy, rw_project_proxy, all_roles, user_test_roles, project_test_roles,
+ onboarding_test_roles, account_test_roles, ns_instantiate_test_roles, user_domain, project_acessible, rw_conman_proxy,
+ syslog_server_test_roles, all_roles_combinations, rw_rbac_int_proxy, tbac, redundancy_config_test_roles):
+ """Creates new users required for a test and assign appropriate roles to them"""
+ if pytest.config.getoption("--user-creation-test"):
+ test_roles = user_test_roles
+ elif pytest.config.getoption("--project-creation-test"):
+ test_roles = project_test_roles
+ elif pytest.config.getoption("--onboarding-test"):
+ test_roles = onboarding_test_roles
+ elif pytest.config.getoption("--account-test"):
+ test_roles = account_test_roles
+ elif pytest.config.getoption("--nsr-test"):
+ test_roles = ns_instantiate_test_roles
+ elif pytest.config.getoption("--syslog-server-test"):
+ test_roles = syslog_server_test_roles
+ elif pytest.config.getoption("--redundancy-role-test"):
+ test_roles = redundancy_config_test_roles
+
+ # Create a project to which these users will be part of
+ if project_acessible != 'default':
+ rift.auto.mano.create_project(rw_conman_proxy, project_acessible)
+
+ def create_user_assign_role(user_name, password, role_set):
+ rift.auto.mano.create_user(rw_user_proxy, user_name, password, user_domain)
+ project_roles_list, platform_roles_list = [], []
+ for role in role_set:
+ if 'platform' in role:
+ platform_roles_list.append(role)
+ else:
+ project_roles_list.append(role)
+ if platform_roles_list:
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, platform_roles_list, user_name, user_domain, rw_rbac_int_proxy)
+ if project_roles_list:
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, project_roles_list, user_name,
+ project_acessible, user_domain, rw_rbac_int_proxy)
+
+ write_roles, read_roles = test_roles
+ fail_roles = [role for role in all_roles if role not in write_roles]
+
+ if False: #If its desired to run checks for all combinations, tbd on what option this will be enabled
+ write_roles_tmp, read_roles_tmp, fail_roles_tmp = [], [], []
+ for role_combination in all_roles_combinations:
+ if bool(set(role_combination).intersection(write_roles)):
+ write_roles_tmp.append(role_combination)
+ continue
+ if bool(set(role_combination).intersection(read_roles)):
+ read_roles_tmp.append(role_combination)
+ continue
+ if bool(set(role_combination).isdisjoint(write_roles)):
+ fail_roles_tmp.append(role_combination)
+ write_roles, read_roles, fail_roles = write_roles_tmp, read_roles_tmp, fail_roles_tmp
+
+ # Create the users with roles mapped
+ write_users, read_users, fail_users = dict(), dict(), dict()
+ for idx, role_set in enumerate(write_roles, 1):
+ if type(role_set) is str:
+ role_set = [role_set]
+ user_name = 'write-{}'.format(idx)
+ if tbac:
+ password=user_name
+ else:
+ password = rift.auto.mano.generate_password()
+ create_user_assign_role(user_name, password, role_set)
+ write_users[user_name] = (role_set, password)
+
+ for idx, role_set in enumerate(read_roles, 1):
+ if type(role_set) is str:
+ role_set = [role_set]
+ user_name = 'read-{}'.format(idx)
+ if tbac:
+ password=user_name
+ else:
+ password = rift.auto.mano.generate_password()
+ create_user_assign_role(user_name, password, role_set)
+ read_users[user_name] = (role_set, password)
+
+ for idx, role_set in enumerate(fail_roles, 1):
+ if type(role_set) is str:
+ role_set = [role_set]
+ user_name = 'fail-{}'.format(idx)
+ if tbac:
+ password=user_name
+ else:
+ password = rift.auto.mano.generate_password()
+ create_user_assign_role(user_name, password, role_set)
+ fail_users[user_name] = (role_set, password)
+ return write_users, read_users, fail_users
+
+
+@pytest.mark.setup('test_rbac_roles_setup')
+@pytest.mark.incremental
+class TestRbacVerification(object):
+ @pytest.mark.skipif(not pytest.config.getoption("--project-creation-test"), reason="need --project-creation-test option to run")
+ def test_project_create_delete_authorization(self, logger, users_test_data, session_class, confd_host, rw_conman_proxy,
+ project_keyed_xpath, project_acessible):
+ """Verifies only users with certain roles can create/delete a project"""
+
+ write_users, read_users, fail_users = users_test_data
+
+ # Check users in write_users dict able to create/delete a project
+ logger.debug('Verifying users which are authorised to create/delete a project')
+ for user in write_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+ pxy = user_session.proxy(RwProjectYang)
+
+ project_name = 'project-{}'.format(user)
+ logger.debug('Trying to create project {}'.format(project_name))
+ rift.auto.mano.create_project(pxy, project_name)
+
+ logger.debug('Trying to delete project {}'.format(project_name))
+ rift.auto.mano.delete_project(pxy, project_name)
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in read_users dict able to read a project
+ logger.debug('Verifying users which are authorised to read a project')
+ for user in read_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+ pxy = user_session.proxy(RwProjectYang)
+
+ logger.debug('User {} trying to read project {}'.format(user, project_acessible))
+ project_ = pxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project_acessible))+'/project-state', list_obj=True)
+ assert project_
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in fail_users dict shouldn't be allowed to create a project or delete a project
+
+ # 'project-admin' user not able to create a project, but can delete a project, hence do the create/delete
+ # operation for this user at the end
+ fail_users_reordered = collections.OrderedDict()
+ for user, role_passwd_tuple in fail_users.items():
+ if any('project-admin' in role for role in role_passwd_tuple[0]):
+ project_admin_key, project_admin_val = user, role_passwd_tuple
+ continue
+ fail_users_reordered[user] = role_passwd_tuple
+ fail_users_reordered[project_admin_key] = project_admin_val
+
+ logger.debug('Verifying users which are not supposed to create/delete a project')
+ for user in fail_users_reordered:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users_reordered[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users_reordered[user][1])
+ pxy = user_session.proxy(RwProjectYang)
+
+ project_name = 'project-{}'.format(user)
+
+ with pytest.raises(Exception, message='User {} not authorised to create project {}'.format(
+ user, project_name)) as excinfo:
+ logger.debug('User {} trying to create project {}'.format(user, project_name))
+ rift.auto.mano.create_project(pxy, project_name)
+
+ logger.debug('User {} trying to delete project {}'.format(user, project_acessible))
+ if any('project-admin' in role for role in fail_users_reordered[user][0]):
+ rift.auto.mano.delete_project(pxy, project_acessible)
+ continue
+ with pytest.raises(Exception, message='User {} not authorised to delete project {}'.format(
+ user, project_acessible)) as excinfo:
+ rift.auto.mano.delete_project(pxy, project_acessible)
+
+ rift.auto.mano.close_session(user_session)
+
+ def delete_user_from_project(
+ self, project_proxy, target_user, target_project, user_domain):
+ project_xpath = (
+ "/project[name={project}]/project-config/user" +
+ "[user-name={user}][user-domain={domain}]"
+ )
+ # Check if the user exists for the project
+ ret_val = project_proxy.get_config(
+ project_xpath.format(
+ project=quoted_key(target_project),
+ user=quoted_key(target_user),
+ domain=quoted_key(user_domain)))
+
+ assert ret_val
+ # Delete the target_user from the target_project
+ project_proxy.delete_config(
+ project_xpath.format(
+ project=quoted_key(target_project),
+ user=quoted_key(target_user),
+ domain=quoted_key(user_domain))
+ )
+ # Verify that he is deleted
+ ret_val = project_proxy.get_config(
+ project_xpath.format(
+ project=quoted_key(target_project),
+ user=quoted_key(target_user),
+ domain=quoted_key(user_domain))
+ )
+ assert ret_val is None
+
+ @pytest.mark.skipif(
+ not pytest.config.getoption("--project-creation-test"),
+ reason="need --project-creation-test option to run")
+ def test_project_admin_users_role_authorization(
+ self, logger, user_roles, rw_user_proxy, session_class,
+ user_domain, confd_host, rw_conman_proxy, project_keyed_xpath,
+ rw_project_proxy, rw_rbac_int_proxy, tbac):
+ """Verify project admin & oper role operations on a single project."""
+ logger.debug(
+ "Create a project & 8 users each with its own project/mano role")
+ rift.auto.mano.create_project(rw_conman_proxy, 'project-vzw')
+ project_user_data = {}
+ for idx, role in enumerate(user_roles, 1):
+ user_name = 'project_vzw_user-{}'.format(idx)
+ if not tbac:
+ password = rift.auto.mano.generate_password()
+ else:
+ password = user_name
+ rift.auto.mano.create_user(
+ rw_user_proxy, user_name, password, user_domain)
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role, user_name, 'project-vzw',
+ user_domain, rw_rbac_int_proxy)
+ project_user_data[user_name] = {"role": role, "password": password}
+ if "project-admin" in role:
+ project_admin_user = user_name
+
+ logger.debug("Project admin deleting roles from users.")
+ project_admin_session = rift.auto.mano.get_session(
+ session_class, confd_host, project_admin_user,
+ project_user_data[project_admin_user]["password"])
+ project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+ for user in project_user_data:
+ role = project_user_data[user]["role"]
+ if project_admin_user == user:
+ continue
+ rift.auto.mano.revoke_project_role_from_user(
+ project_admin_proxy, role, user, 'project-vzw', user_domain)
+ rift.auto.mano.close_session(project_admin_session)
+
+ logger.debug("Verify project admin can assign another role to users")
+ project_admin_session = rift.auto.mano.get_session(
+ session_class, confd_host, project_admin_user,
+ project_user_data[project_admin_user]["password"])
+ project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+ for user in project_user_data:
+ role = 'rw-project:project-oper'
+ if project_admin_user == user:
+ continue
+ rift.auto.mano.assign_project_role_to_user(
+ project_admin_proxy, role, user, 'project-vzw',
+ user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.close_session(project_admin_session)
+
+ # Verify the user able to read project
+ for user in project_user_data:
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, user,
+ project_user_data[user]["password"])
+ user_project_pxy = user_session.proxy(RwProjectYang)
+ logger.debug("verifying user able to read project")
+ xpath = "/project[name={project}]/project-config"
+ ret_val = user_project_pxy.get_config(
+ xpath.format(project=quoted_key('project-vzw')))
+ assert ret_val
+ rift.auto.mano.close_session(user_session)
+
+ logger.debug("Verify if project admin can replace roles for users")
+ project_admin_session = rift.auto.mano.get_session(
+ session_class, confd_host, project_admin_user,
+ project_user_data[project_admin_user]["password"])
+ project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+ for user in project_user_data:
+ if project_admin_user != user:
+ xpath = (
+ "/project[name={project}]/project-config/user" +
+ "[user-name={user}][user-domain={domain}]")
+ new_role = (
+ RwProjectYang.
+ YangData_RwProject_Project_ProjectConfig_User_Role.
+ from_dict({
+ 'role': 'rw-project-mano:account-admin'})
+ )
+ project_admin_proxy.replace_config(
+ xpath.format(
+ project=quoted_key('project-vzw'),
+ user=quoted_key(user),
+ domain=quoted_key(user_domain)), new_role)
+ ret_val = project_admin_proxy.get_config(
+ xpath.format(
+ project=quoted_key('project-vzw'),
+ user=quoted_key(user),
+ domain=quoted_key(user_domain),
+ role=quoted_key('rw-project-mano:lcm-oper')))
+ assert ret_val
+ rift.auto.mano.close_session(project_admin_session)
+
+ logger.debug("Verify if users able to change its own user details")
+ for user in project_user_data:
+ if tbac:
+ break
+ password = project_user_data[user]["password"]
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, user, password)
+ user_proxy = user_session.proxy(RwUserYang)
+ rift.auto.mano.update_password(
+ user_proxy, user, user, user_domain, rw_rbac_int_proxy)
+ project_user_data[user]["new_password"] = user
+ rift.auto.mano.close_session(user_session)
+
+ logger.debug(
+ "{} trying to connect ".format(user) +
+ "with its old password {}".format(password)
+ )
+
+ message = ('{} not supposed to '.format(user) +
+ 'log-in with old passwd {}'.format(password))
+ with pytest.raises(Exception, message=message):
+ rift.auto.mano.get_session(
+ session_class, confd_host, user,
+ password, timeout=SESSION_CONNECT_TIMEOUT)
+
+ # Verify the user should be able to log-in with new password
+ logger.debug(
+ "User {} trying to log-in with its updated password {}".format(
+ user, project_user_data[user]["new_password"]))
+
+ usession_updated_passwd = rift.auto.mano.get_session(
+ session_class, confd_host, user,
+ project_user_data[user]["new_password"])
+
+ # project admin able to delete users from the project database
+ if tbac:
+ password = project_user_data[project_admin_user]["password"]
+ else:
+ password = project_user_data[project_admin_user]["new_password"]
+ project_admin_session = rift.auto.mano.get_session(
+ session_class, confd_host, project_admin_user, password)
+ project_admin_proxy = project_admin_session.proxy(RwProjectYang)
+
+ for user in project_user_data:
+ if user == project_admin_user:
+ continue
+ logger.debug('deleting user {} from project project-vzw'.format(user))
+ self.delete_user_from_project(
+ project_admin_proxy, user, 'project-vzw', user_domain)
+ rift.auto.mano.close_session(project_admin_session)
+
+ @pytest.mark.skipif(
+ not pytest.config.getoption("--project-creation-test"),
+ reason="need --project-creation-test option to run")
+ def test_multi_project_multi_users_role_authorization(
+ self, logger, user_roles, rw_user_proxy, session_class,
+ user_domain, confd_host, rw_conman_proxy, project_keyed_xpath,
+ rw_project_proxy, rw_rbac_int_proxy, tbac, rbac_user_passwd):
+ """Verify that users with roles doesn't have unauthorized access."""
+ """
+ Case 01. rbac_user2 has different roles in project1 and project2.
+ Case 02. rbac_user4 has project-admin in project3 and project4.
+ Case 03. rbac_user9 has project-oper in project5 and project6.
+ """
+
+ # The sample user data
+ role1 = 'rw-project:project-admin'
+ role2 = 'rw-project:project-oper'
+ project_user_data = {
+ "project1": {
+ "rbac_user1": role1,
+ "rbac_user2": role2,
+ },
+ "project2": {
+ "rbac_user2": role1,
+ "rbac_user3": role2,
+ },
+ "project3": {
+ "rbac_user4": role1,
+ "rbac_user5": role2,
+
+ },
+ "project4": {
+ "rbac_user4": role1,
+ "rbac_user6": role2,
+ },
+ "project5": {
+ "rbac_user7": role1,
+ "rbac_user9": role2,
+ },
+ "project6": {
+ "rbac_user8": role1,
+ "rbac_user9": role2,
+ }
+ }
+ # Create projects
+ for idx in range(1, 7):
+ rift.auto.mano.create_project(
+ rw_conman_proxy, 'project{}'.format(idx))
+ # Create users
+ for idx in range(1, 10):
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'rbac_user{}'.format(idx),
+ rbac_user_passwd, user_domain)
+ # Assign roles to users according to the project_user_data
+ for idx in range(1, 7):
+ project = 'project{}'.format(idx)
+ for user_name, role in project_user_data[project].items():
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role, user_name, project,
+ user_domain, rw_rbac_int_proxy)
+
+ def project_access(
+ user_name, target_project, session_class,
+ confd_host, logger):
+ """Verify if user has access to target project."""
+ password = rbac_user_passwd
+ if tbac:
+ password = user_name
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, user_name, password)
+ logger.debug("{} trying to access {}".format(
+ user_name, target_project) +
+ "/project-state"
+ )
+ pxy = user_session.proxy(RwProjectYang)
+ # Verify is user has access to /project
+ project_xpath = '/project[name={}]/project-state'.format(
+ quoted_key(target_project)
+ )
+ response = pxy.get_config(project_xpath, list_obj=True)
+ assert response
+ # Verify is user has access to /project/project-config/user
+ project_user_xpath = (
+ "/project[name={project}]/project-config/" +
+ "user[user-name={user}][user-domain={domain}]"
+ )
+ target_user = list(project_user_data[target_project].keys())[0]
+ pxy = user_session.proxy(RwProjectYang)
+ response = pxy.get_config(
+ project_user_xpath.format(
+ project=quoted_key(target_project),
+ user=quoted_key(target_user),
+ domain=quoted_key(user_domain)
+ )
+ )
+ assert response
+ rift.auto.mano.close_session(user_session)
+
+ # Case 01. rbac_user2 has different roles in project1 and project2.
+
+ logger.debug('Veryfy rbac_user1 of project1 has no access to project2')
+ with pytest.raises(
+ Exception,
+ message="rbac_user1 accessed project2 which its not part of."):
+ project_access(
+ 'rbac_user1', 'project2', session_class, confd_host, logger)
+
+ logger.debug('Verify rbac_user2 has access to project1 and project2')
+ project_access(
+ 'rbac_user2', 'project1', session_class, confd_host, logger)
+ project_access(
+ 'rbac_user2', 'project2', session_class, confd_host, logger)
+
+ # Case 02. rbac_user4 has project-admin in project3 and project4.
+
+ logger.debug('Verify rbac_user4 has access to project 3 & project4')
+ project_access(
+ 'rbac_user4', 'project4', session_class, confd_host, logger)
+ project_access(
+ 'rbac_user4', 'project3', session_class, confd_host, logger)
+
+ logger.debug('Two users in project3 exchanges roles & check access')
+ rift.auto.mano.revoke_project_role_from_user(
+ rw_project_proxy, role1, 'rbac_user4',
+ 'project3', user_domain)
+ rift.auto.mano.revoke_project_role_from_user(
+ rw_project_proxy, role2, 'rbac_user5',
+ 'project3', user_domain)
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role2, 'rbac_user4',
+ 'project3', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role1, 'rbac_user5',
+ 'project3', user_domain, rw_rbac_int_proxy)
+
+ logger.debug('rbac_user5 trying its access on project3 and project4')
+ project_access(
+ 'rbac_user5', 'project3', session_class,
+ confd_host, logger
+ )
+ with pytest.raises(
+ Exception,
+ message="rbac_user5 accessed project4 which its not part of."):
+ project_access(
+ 'rbac_user5', 'project4', session_class,
+ confd_host, logger
+ )
+
+ # 'rbac_user5'(admin role) revoking the role from rbac-user4
+ password = rbac_user_passwd
+ if tbac:
+ password = 'rbac_user5'
+ rbac_user2_session = rift.auto.mano.get_session(
+ session_class, confd_host, 'rbac_user5', password)
+ rbac_user2_prjt_pxy = rbac_user2_session.proxy(RwProjectYang)
+ self.delete_user_from_project(
+ rbac_user2_prjt_pxy, 'rbac_user4', 'project3', user_domain)
+
+ # Create new user 'del-user'
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'del-user', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role2, 'del-user', 'project3',
+ user_domain, rw_rbac_int_proxy)
+ # Delete 'del-user' with 'rbac_user5'(admin role)
+ self.delete_user_from_project(
+ rbac_user2_prjt_pxy, 'del-user', 'project3', user_domain)
+
+ logger.debug(
+ 'rbac_user4 try to access project3 which its not a part of anymore'
+ )
+ with pytest.raises(
+ Exception,
+ message="rbac_user4 accessed project3 which its not part of."):
+ project_access(
+ 'rbac_user4', 'project3', session_class,
+ confd_host, logger)
+
+ logger.debug('rbac_user4 try to access project4 which its a part of.')
+ project_access(
+ 'rbac_user4', 'project4', session_class,
+ confd_host, logger)
+
+ # Case 03. rbac_user9 has project-oper in project5 and project6.
+
+ logger.debug('rbac_user9 try to access project5 & project6')
+ project_access(
+ 'rbac_user9', 'project5', session_class,
+ confd_host, logger)
+ project_access(
+ 'rbac_user9', 'project6', session_class,
+ confd_host, logger)
+
+ logger.debug(
+ 'rbac_user8 try to access to project5 which its not part of.'
+ )
+ with pytest.raises(
+ Exception,
+ message="rbac_user8 accessed project5 which its not part of."):
+ project_access(
+ 'rbac_user8', 'project5', session_class,
+ confd_host, logger)
+
+ logger.debug(
+ 'rbac_user7 try to access to project6 which its not part of.'
+ )
+ with pytest.raises(
+ Exception,
+ message="rbac_user7 accessed project6 which its not part of."):
+ project_access(
+ 'rbac_user7', 'project6', session_class,
+ confd_host, logger)
+
+
+ @pytest.mark.skipif(not pytest.config.getoption("--user-creation-test"), reason="need --user-creation-test option to run")
+ def test_user_create_delete_authorization(self, logger, users_test_data, session_class, confd_host, rw_user_proxy,
+ rbac_user_passwd, user_domain, tbac, rw_rbac_int_proxy):
+ """Verifies only users with certain roles can create/delete users and set the password of an user"""
+ write_users, read_users, fail_users = users_test_data
+
+ # Create a dummy user with admin/admin
+ dummy_user_name = 'dummy-user'
+ rift.auto.mano.create_user(rw_user_proxy, dummy_user_name, rbac_user_passwd, user_domain)
+
+ # Check users in write_users dict able to create/delete an user and able to set password for others
+ logger.debug('Verifying users which are authorised to create/delete an user')
+ for user in write_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+ pxy = user_session.proxy(RwUserYang)
+
+ user_name = 'new-user-{}'.format(user)
+ logger.debug('Trying to create user {}'.format(user_name))
+ rift.auto.mano.create_user(pxy, user_name, rbac_user_passwd, user_domain)
+
+ logger.debug('Trying to delete user {}'.format(user_name))
+ rift.auto.mano.delete_user(pxy, user_name, user_domain)
+
+ if not tbac: # password update is not allowed for external users in tbac
+ new_passwd = rift.auto.mano.generate_password()
+ # Check users in write_users dict able to set password for other user (dummy-user)
+ logger.debug('User {} trying to update password for user {}'.format(user, dummy_user_name))
+ rift.auto.mano.update_password(pxy, dummy_user_name, new_passwd, user_domain, rw_rbac_int_proxy)
+
+ # Verify dummy_user_name able to log-in with its new password
+ logger.debug('User {} trying to log-in with its updated password {}'.format(dummy_user_name, new_passwd))
+ dummy_user_session_updated_passwd = rift.auto.mano.get_session(session_class, confd_host, dummy_user_name,
+ new_passwd)
+
+ # Verify the user not able to log-in with old password
+ with pytest.raises(Exception, message='User {} not supposed to log-in with its old password {}'.format(
+ dummy_user_name, rbac_user_passwd)) as excinfo:
+ logger.debug('User {} trying to connect with its old password {}'.format(user, rbac_user_passwd))
+ rift.auto.mano.get_session(session_class, confd_host, dummy_user_name, rbac_user_passwd,
+ timeout=SESSION_CONNECT_TIMEOUT)
+
+ rift.auto.mano.close_session(dummy_user_session_updated_passwd)
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in read_users dict able to read user list (path: /user-config)
+ logger.debug('Verifying users which are authorised to read user list')
+ for user in read_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+ pxy = user_session.proxy(RwUserYang)
+ logger.debug('User {} trying to access /user-config xpath'.format(user))
+ user_config = pxy.get_config('/user-config')
+ assert [user.user_name for user in user_config.user]
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in fail_users dict not able to create/delete an user
+ logger.debug('Verifying users which are not supposed to create/delete an user')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ pxy = user_session.proxy(RwUserYang)
+
+ user_name = 'new-user-{}'.format(user)
+
+ with pytest.raises(Exception, message='User {} not authorised to create user {}'.format(
+ user, user_name)) as excinfo:
+ logger.debug('User {} trying to create an user {}'.format(user, user_name))
+ rift.auto.mano.create_user(pxy, user_name, rbac_user_passwd, user_domain)
+
+ with pytest.raises(Exception, message='User {} not authorised to delete user {}'.format(
+ user, dummy_user_name)) as excinfo:
+ logger.debug('User {} trying to delete user {}'.format(user, dummy_user_name))
+ rift.auto.mano.delete_user(pxy, dummy_user_name, user_domain)
+
+ rift.auto.mano.close_session(user_session)
+
+ if not tbac: # password update is not allowed for external users in tbac
+ # Check all users able to set their own password
+ logger.debug('Verifying an user able to set its own password')
+ for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+ pxy = user_session.proxy(RwUserYang)
+
+ new_passwd = rift.auto.mano.generate_password()
+ logger.debug('User {} trying to update its password to {}'.format(user, new_passwd))
+ rift.auto.mano.update_password(pxy, user, new_passwd, user_domain, rw_rbac_int_proxy)
+
+ # Verify the user should be able to log-in with new password
+ logger.debug('User {} trying to log-in with its updated password {}'.format(user, new_passwd))
+ user_session_updated_passwd = rift.auto.mano.get_session(session_class, confd_host, user, new_passwd)
+
+ # Verify the user not able to log-in with old password
+ with pytest.raises(Exception, message='User {} not supposed to log-in with its old password {}'.format(
+ user, role_passwd_tuple[1])) as excinfo:
+ logger.debug('User {} trying to connect with its old password {}'.format(user, role_passwd_tuple[1]))
+ rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd, timeout=SESSION_CONNECT_TIMEOUT)
+
+ rift.auto.mano.close_session(user_session)
+ rift.auto.mano.close_session(user_session_updated_passwd)
+
+
+ @pytest.mark.skipif(not pytest.config.getoption("--account-test"), reason="need --account-test option to run")
+ def test_account_create_delete_authorization(self, users_test_data, mgmt_session, logger, cloud_module, fmt_cloud_xpath,
+ fmt_prefixed_cloud_xpath, project_acessible, cloud_account, session_class, confd_host):
+ """Verifies only users with certain roles can create/read/delete cloud, sdn accounts"""
+ write_users, read_users, fail_users = users_test_data
+ xpath_no_pfx = fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name))
+ xpath = fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name))
+
+ # Check users in write_users dict able to create/delete cloud accounts
+ logger.debug('Verifying users which are authorised to create/delete cloud accounts')
+ for user in write_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+ cloud_pxy = user_session.proxy(cloud_module)
+
+ logger.debug('Trying to create a cloud account')
+ cloud_pxy.replace_config(xpath, cloud_account)
+ response = cloud_pxy.get(xpath_no_pfx)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+ logger.debug('Trying to delete the cloud account')
+ cloud_pxy.delete_config(xpath)
+ assert cloud_pxy.get(xpath_no_pfx) is None
+
+ rift.auto.mano.close_session(user_session)
+
+ # admin user creating a cloud account which read_users will be trying to read
+ logger.debug('admin user creating cloud account {}'.format(cloud_account.name))
+ admin_cloud_proxy = mgmt_session.proxy(cloud_module)
+ admin_cloud_proxy.replace_config(xpath, cloud_account)
+ assert admin_cloud_proxy.get(xpath_no_pfx).name == cloud_account.name
+
+ # Check users in read_users dict able to read cloud accounts
+ logger.debug('Verifying users which are authorised to read cloud accounts')
+ for user in read_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+ cloud_pxy = user_session.proxy(cloud_module)
+
+ response = cloud_pxy.get(xpath_no_pfx)
+ assert response.name == cloud_account.name
+ assert response.account_type == cloud_account.account_type
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in fail_users dict not able to delete/read cloud accounts
+ logger.debug('Verifying users which are not authorised to read/delete cloud accounts')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ cloud_pxy = user_session.proxy(cloud_module)
+
+ with pytest.raises(Exception, message='User {} not authorised to delete cloud account {}'.format(
+ user, cloud_account.name)) as excinfo:
+ logger.debug('User {} trying to delete cloud account {}'.format(user, cloud_account.name))
+ cloud_pxy.delete_config(xpath)
+
+ # logger.debug('User {} trying to access cloud account {}'.format(user, cloud_account.name))
+ # assert cloud_pxy.get(xpath_no_pfx) is None
+ rift.auto.mano.close_session(user_session)
+
+ # admin user deleting the cloud account
+ logger.debug('admin user deleting cloud account {}'.format(cloud_account.name))
+ admin_cloud_proxy.delete_config(xpath)
+ assert admin_cloud_proxy.get(xpath_no_pfx) is None
+
+ # Check users in fail_users dict not able to create cloud accounts
+ logger.debug('Verifying users which are not authorised to create cloud accounts')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ cloud_pxy = user_session.proxy(cloud_module)
+
+ with pytest.raises(Exception, message='User {} not authorised to create cloud account {}'.format(
+ user, cloud_account.name)) as excinfo:
+ logger.debug('User {} trying to create a cloud account {}'.format(user, cloud_account.name))
+ cloud_pxy.replace_config(xpath, cloud_account)
+
+ rift.auto.mano.close_session(user_session)
+
+ @staticmethod
+ def delete_descriptors(project, vnfd_proxy, nsd_proxy, vnfd_xpath, nsd_xpath, fmt_vnfd_id_xpath, fmt_nsd_id_xpath):
+ nsds = nsd_proxy.get('{}/nsd'.format(nsd_xpath), list_obj=True)
+ for nsd in nsds.nsd:
+ xpath = fmt_nsd_id_xpath.format(project=quoted_key(project), nsd_id=quoted_key(nsd.id))
+ nsd_proxy.delete_config(xpath)
+ nsds = nsd_proxy.get('{}/nsd'.format(nsd_xpath), list_obj=True)
+ assert nsds is None or len(nsds.nsd) == 0
+
+ vnfds = vnfd_proxy.get('{}/vnfd'.format(vnfd_xpath), list_obj=True)
+ for vnfd_record in vnfds.vnfd:
+ xpath = fmt_vnfd_id_xpath.format(project=quoted_key(project), vnfd_id=quoted_key(vnfd_record.id))
+ vnfd_proxy.delete_config(xpath)
+
+ vnfds = vnfd_proxy.get('{}/vnfd'.format(vnfd_xpath), list_obj=True)
+ assert vnfds is None or len(vnfds.vnfd) == 0
+
+ @pytest.mark.skipif(not pytest.config.getoption("--onboarding-test"), reason="need --onboarding-test option to run")
+ def test_onboarding_authorization(self, users_test_data, logger, descriptors, session_class, confd_host,
+ fmt_vnfd_catalog_xpath, fmt_nsd_catalog_xpath, fmt_nsd_id_xpath, fmt_vnfd_id_xpath, project_acessible, mgmt_session):
+ """Verifies only users with certain roles can onboard/update/delete a package"""
+
+ descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+ write_users, read_users, fail_users = users_test_data
+ logger.debug('The descriptrs being used: {}'.format(descriptors))
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_acessible))
+ vnfd_xpath = fmt_vnfd_catalog_xpath.format(project=quoted_key(project_acessible))
+
+ def onboard(user_session, project):
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(user_session, descriptor, project=project)
+
+ def verify_descriptors(vnfd_pxy, nsd_pxy, vnfd_count, nsd_count):
+ catalog = vnfd_pxy.get_config(vnfd_xpath)
+ actual_vnfds = catalog.vnfd
+ assert len(actual_vnfds) == vnfd_count, 'There should be {} vnfds'.format(vnfd_count)
+ catalog = nsd_pxy.get_config(nsd_xpath)
+ actual_nsds = catalog.nsd
+ assert len(actual_nsds) == nsd_count, 'There should be {} nsd'.format(nsd_count)
+
+ # Check users in write_users dict able to onboard/delete descriptors
+ logger.debug('Verifying users which are authorised to onboard/delete descriptors')
+ for user in write_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+ vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+ nsd_pxy = user_session.proxy(RwProjectNsdYang)
+ logger.debug('Trying to onboard ping-pong descriptors')
+ onboard(user_session, project_acessible)
+ logger.debug('Verifying if the descriptors are uploaded')
+ verify_descriptors(vnfd_pxy, nsd_pxy, len(descriptor_vnfds), 1)
+
+ logger.debug('Trying to delete descriptors')
+ TestRbacVerification.delete_descriptors(project_acessible, vnfd_pxy, nsd_pxy, vnfd_xpath, nsd_xpath,
+ fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+ rift.auto.mano.close_session(user_session)
+
+ # onboard the descriptors using mgmt_session which read_users will try to read
+ logger.debug('admin user uploading the descriptors which read_users will try to read')
+ onboard(mgmt_session, project_acessible)
+ admin_vnfd_pxy = mgmt_session.proxy(RwProjectVnfdYang)
+ admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ logger.debug('Verifying if the descriptors are uploaded')
+ verify_descriptors(admin_vnfd_pxy, admin_nsd_pxy, len(descriptor_vnfds), 1)
+
+ # Check users in read_users dict able to read already onboarded descriptors
+ logger.debug('Verifying users which are authorised to read descriptors')
+ for user in read_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, read_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, read_users[user][1])
+ vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+ nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+ logger.debug('Trying to read ping-pong descriptors')
+ verify_descriptors(vnfd_pxy, nsd_pxy, len(descriptor_vnfds), 1)
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in fail_users dict not able to onboard/delete descriptors
+ logger.debug('Verifying users which are not supposed to delete descriptors')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+ nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+ with pytest.raises(Exception, message='User {} not authorised to delete descriptors'.format(user)) as excinfo:
+ logger.debug('User {} trying to delete descriptors'.format(user))
+ TestRbacVerification.delete_descriptors(project_acessible, vnfd_pxy, nsd_pxy, vnfd_xpath, nsd_xpath,
+ fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+ rift.auto.mano.close_session(user_session)
+
+ logger.debug('Deleting the descriptors as fail_users trying to upload the descriptors')
+ TestRbacVerification.delete_descriptors(project_acessible, admin_vnfd_pxy, admin_nsd_pxy, vnfd_xpath, nsd_xpath,
+ fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+ logger.debug('Verifying users which are not supposed to create descriptors')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ vnfd_pxy = user_session.proxy(RwProjectVnfdYang)
+ nsd_pxy = user_session.proxy(RwProjectNsdYang)
+
+ with pytest.raises(Exception, message='User {} not authorised to onboard descriptors'.format(user)) as excinfo:
+ logger.debug('User {} trying to onboard ping-pong descriptors'.format(user))
+ onboard(user_session)
+
+ rift.auto.mano.close_session(user_session)
+
+ @pytest.mark.skipif(not pytest.config.getoption("--nsr-test"),
+ reason="need --nsr-test option to run")
+ def test_nsr_authorization(self, users_test_data, logger, cloud_account,
+ cloud_module, descriptors, session_class,
+ confd_host, fmt_cloud_xpath,
+ fmt_prefixed_cloud_xpath, mgmt_session, fmt_nsd_id_xpath, fmt_vnfd_id_xpath,
+ project_acessible, fmt_nsd_catalog_xpath, fmt_vnfd_catalog_xpath):
+ """Verifies only users with certain roles can
+ create/read/delete nsr/vlr/vnfr
+ """
+
+ descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
+ write_users, read_users, fail_users = users_test_data
+
+ # Cloud account creation
+ logger.debug('Creating a cloud account which will be used for NS instantiation')
+ cloud_pxy = mgmt_session.proxy(cloud_module)
+ cloud_pxy.replace_config(fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible),
+ account_name=quoted_key(cloud_account.name)),
+ cloud_account)
+ response = cloud_pxy.get(
+ fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(cloud_account.name)))
+ assert response.name == cloud_account.name
+
+ cloud_pxy.wait_for(fmt_cloud_xpath.format(project=quoted_key(project_acessible), account_name=quoted_key(
+ cloud_account.name)) + '/connection-status/status', 'success', timeout=30, fail_on=['failure'])
+
+ # Upload the descriptors
+ nsd_xpath = fmt_nsd_catalog_xpath.format(project=quoted_key(project_acessible))
+ vnfd_xpath = fmt_vnfd_catalog_xpath.format(project=quoted_key(project_acessible))
+ logger.debug('Uploading descriptors {} which will be used for NS instantiation'.format(descriptors))
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor, project=project_acessible)
+ admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = admin_nsd_pxy.get_config(nsd_xpath)
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+
+ # Check users in write_users dict able to instantiate/delete a NS
+ logger.debug('Verifying users which are authorised to instantiate/delete a NS')
+ for user in write_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, write_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, write_users[user][1])
+ rwnsr_pxy = user_session.proxy(RwNsrYang)
+ rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+ rwvlr_pxy = user_session.proxy(RwVlrYang)
+
+ logger.info("Trying to instantiate the Network Service")
+ rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger,
+ project=project_acessible)
+
+ logger.info("Trying to terminate the Network Service")
+ rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy,
+ rwvlr_pxy, logger,
+ project_acessible)
+
+ # Instantiate a NS which the read_users, fail_users will try to
+ # read/delete.
+ admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+ admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+ logger.debug('admin user instantiating NS which the read_users, fail_users will try to read/delete.')
+ rift.auto.descriptor.instantiate_nsr(nsr, admin_rwnsr_pxy, logger, project=project_acessible)
+
+ # Check users in read_users, write_users dict able to read vnfr-console, vnfr-catalog, ns-instance-opdata
+ p_xpath = '/project[name={}]'.format(quoted_key(project_acessible))
+ read_xpaths = ['/ns-instance-opdata', '/vnfr-catalog', '/vnfr-console']
+ logger.debug('Verifying users which are authorised to read vnfr-catalog, ns-instance-opdata, vnfr-console etc')
+ for user, role_passwd_tuple in dict(write_users, **read_users).items():
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+ rwnsr_pxy = user_session.proxy(RwNsrYang)
+ rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+ for xpath in read_xpaths:
+ logger.debug('Trying to read xpath: {}'.format(p_xpath+xpath))
+ proxy_ = rwvnfr_pxy if 'vnfr' in xpath else rwnsr_pxy
+ assert proxy_.get(p_xpath+xpath)
+
+ rift.auto.mano.close_session(user_session)
+
+ # Check users in fail_users dict not able to terminate a NS
+ logger.debug('Verifying users which are NOT authorised to terminate a NS')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ rwnsr_pxy = user_session.proxy(RwNsrYang)
+ rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+
+ with pytest.raises(Exception, message='User {} not authorised to terminate NS'.format(user)) as excinfo:
+ logger.debug('User {} trying to delete NS'.format(user))
+ rift.auto.descriptor.terminate_nsr(rwvnfr_pxy, rwnsr_pxy,
+ logger, admin_rwvlr_pxy,
+ project=project_acessible)
+ rift.auto.mano.close_session(user_session)
+
+ # Terminate the NS instantiated by admin user
+ logger.debug('admin user terminating the NS')
+ rift.auto.descriptor.terminate_nsr(admin_rwvnfr_pxy,
+ admin_rwnsr_pxy,
+ admin_rwvlr_pxy, logger,
+ project=project_acessible)
+
+ # Check users in fail_users dict not able to instantiate a NS
+ nsr.id = str(uuid.uuid4())
+ logger.debug('Verifying users which are NOT authorised to instantiate a NS')
+ for user in fail_users:
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, fail_users[user]))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, fail_users[user][1])
+ rwnsr_pxy = user_session.proxy(RwNsrYang)
+ rwvnfr_pxy = user_session.proxy(RwVnfrYang)
+
+ with pytest.raises(Exception, message='User {} not authorised to instantiate NS'.format(user)) as excinfo:
+ logger.debug('User {} trying to instantiate NS'.format(user))
+ rift.auto.descriptor.instantiate_nsr(nsr, rwnsr_pxy, logger, project=project_acessible)
+ rift.auto.mano.close_session(user_session)
+
+ # delete cloud accounts and descriptors; else deleting project in teardown fails
+ cloud_pxy.delete_config(fmt_prefixed_cloud_xpath.format(project=quoted_key(project_acessible),
+ account_name=quoted_key(cloud_account.name)))
+ admin_vnfd_pxy = mgmt_session.proxy(RwProjectVnfdYang)
+ TestRbacVerification.delete_descriptors(project_acessible, admin_vnfd_pxy, admin_nsd_pxy, vnfd_xpath, nsd_xpath,
+ fmt_vnfd_id_xpath, fmt_nsd_id_xpath)
+
+ @pytest.mark.skipif(not pytest.config.getoption("--syslog-server-test"), reason="need --syslog-server-test option to run")
+ def test_set_syslog_server_authorization(self, mgmt_session, users_test_data, session_class, confd_host, logger):
+ """Verifies only users with certain roles can set syslog server"""
+ write_users, read_users, fail_users = users_test_data
+ admin_log_mgmt_pxy = mgmt_session.proxy(RwlogMgmtYang)
+
+ def update_syslog_server_address(user_log_mgmt_pxy):
+ ip = '127.0.0.{}'.format(random.randint(0,255))
+ sink_obj = RwlogMgmtYang.Logging_Sink.from_dict({'server_address': ip})
+
+ syslog_name = admin_log_mgmt_pxy.get_config('/logging').sink[0].name
+ logger.debug('updating the syslog {} server_address to {}'.format(syslog_name, ip))
+ user_log_mgmt_pxy.merge_config('/logging/sink[name={sink_name}]'.format(sink_name=quoted_key(syslog_name)), sink_obj)
+ assert [sink.server_address for sink in admin_log_mgmt_pxy.get_config('/logging').sink if sink.name == syslog_name][0] == ip
+
+ for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+ user_log_mgmt_pxy = user_session.proxy(RwlogMgmtYang)
+
+ if user in write_users:
+ logger.debug('User {} should be able to update the syslog server address'.format(user))
+ update_syslog_server_address(user_log_mgmt_pxy)
+
+ if user in fail_users:
+ with pytest.raises(Exception, message='User {} not authorised to set syslog server address'.format(user)) as excinfo:
+ logger.debug('User {} trying to update the syslog server address. It should fail'.format(user))
+ update_syslog_server_address(user_log_mgmt_pxy)
+
+ if user in read_users:
+ logger.debug('User {} trying to read the syslog server address'.format(user))
+ logging_obj = user_log_mgmt_pxy.get_config('/logging')
+ assert logging_obj.sink[0]
+ assert logging_obj.sink[0].server_address
+
+ @pytest.mark.skipif(not pytest.config.getoption("--redundancy-role-test"), reason="need --redundancy-role-test option to run")
+ def test_redundancy_config_authorization(self, mgmt_session, users_test_data, session_class, confd_host, logger, redundancy_config_test_roles):
+ """Verifies only users with certain roles can set redundancy-config or read redundancy-state"""
+ write_users, read_users, fail_users = users_test_data
+ admin_redundancy_pxy = mgmt_session.proxy(RwRedundancyYang)
+ site_nm_pfx = 'ha_site_'
+
+ def create_redundancy_site(user_redundancy_pxy, site_nm):
+ site_id = '127.0.0.1'
+ site_obj = RwRedundancyYang.YangData_RwRedundancy_RedundancyConfig_Site.from_dict({'site_name': site_nm, 'site_id': site_id})
+
+ logger.debug('Creating redundancy site {}'.format(site_nm))
+ user_redundancy_pxy.create_config('/rw-redundancy:redundancy-config/rw-redundancy:site', site_obj)
+ assert [site.site_name for site in admin_redundancy_pxy.get_config('/redundancy-config/site', list_obj=True).site if site.site_name == site_nm]
+
+ def delete_redundancy_site(user_redundancy_pxy, site_nm):
+ logger.debug('Deleting redundancy site {}'.format(site_nm))
+ user_redundancy_pxy.delete_config('/rw-redundancy:redundancy-config/rw-redundancy:site[rw-redundancy:site-name={}]'.format(quoted_key(site_nm)))
+ assert not [site.site_name for site in admin_redundancy_pxy.get_config('/redundancy-config/site', list_obj=True).site if site.site_name == site_nm]
+
+ # Create a redundancy site which fail user will try to delete/ read user will try to read
+ create_redundancy_site(admin_redundancy_pxy, 'test_site')
+
+ for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+ logger.debug('Verifying user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, role_passwd_tuple[1])
+ user_redundancy_pxy = user_session.proxy(RwRedundancyYang)
+
+ if user in write_users:
+ site_nm = '{}_{}'.format(site_nm_pfx, user)
+ logger.debug('User {} should be able to create a new redundancy site {}'.format(user, site_nm))
+ create_redundancy_site(user_redundancy_pxy, site_nm)
+
+ logger.debug('User {} should be able to delete a redundancy site {}'.format(user, site_nm))
+ delete_redundancy_site(user_redundancy_pxy, site_nm)
+
+ assert user_redundancy_pxy.get('/redundancy-state')
+
+ if user in fail_users:
+ site_nm = '{}_{}'.format(site_nm_pfx, user)
+ with pytest.raises(Exception, message='User {} not authorised to create redundancy site'.format(user)) as excinfo:
+ logger.debug('User {} trying to create redundancy site {}. It should fail'.format(user, site_nm))
+ create_redundancy_site(user_redundancy_pxy, site_nm)
+
+ with pytest.raises(Exception, message='User {} not authorised to delete redundancy site'.format(user)) as excinfo:
+ logger.debug('User {} trying to delete redundancy site {}. It should fail'.format(user, site_nm))
+ delete_redundancy_site(user_redundancy_pxy, 'test_site')
+
+ if user in read_users:
+ logger.debug('User {} trying to read redundancy-config'.format(user))
+ assert user_redundancy_pxy.get('/redundancy-state')
+ assert user_redundancy_pxy.get('/redundancy-config')
+
+
+@pytest.mark.depends('test_rbac_roles_setup')
+@pytest.mark.teardown('test_rbac_roles_setup')
+@pytest.mark.incremental
+class TestRbacTeardown(object):
+ def test_delete_project(self, rw_project_proxy, logger, project_keyed_xpath, project_acessible):
+ """Deletes projects used for the test"""
+ if rw_project_proxy.get_config(project_keyed_xpath.format(project_name=quoted_key(project_acessible))+'/project-state', list_obj=True):
+ logger.debug('Deleting project {}'.format(project_acessible))
+ rift.auto.mano.delete_project(rw_project_proxy, project_acessible)
+
+ def test_delete_users(self, users_test_data, logger, rw_user_proxy, rbac_platform_proxy, platform_config_keyed_xpath,
+ user_keyed_xpath, user_domain, rw_conman_proxy, project_acessible):
+ """Deletes the users which are part of rbac test-data and verify their deletion"""
+ write_users, read_users, fail_users = users_test_data
+
+ for user, role_passwd_tuple in dict(write_users, **dict(read_users, **fail_users)).items():
+ logger.debug('Deleting user:(role,password) {}:{}'.format(user, role_passwd_tuple))
+ if any('platform' in role for role in role_passwd_tuple[0]):
+ rbac_platform_proxy.delete_config(platform_config_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+ rw_user_proxy.delete_config(user_keyed_xpath.format(user=quoted_key(user), domain=quoted_key(user_domain)))
+
+ # Verify if the user is deleted
+ user_config = rw_user_proxy.get_config('/user-config')
+ current_users_list = [user.user_name for user in user_config.user]
+
+ assert user not in current_users_list
+
+ # Verify only two users should be present now: oper & admin
+ user_config = rw_user_proxy.get_config('/user-config')
+ current_users_list = [user.user_name for user in user_config.user]
+
+ logger.debug('Current users list after deleting all test users: {}'.format(current_users_list))
+ expected_empty_user_list = [user for user in users_test_data if user in current_users_list]
+ assert not expected_empty_user_list
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+
+import gi
+import pytest
+import time
+import random
+import rift.auto.mano
+import rift.auto.descriptor
+
+gi.require_version('RwConmanYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwRbacPlatformYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwUserYang', '1.0')
+gi.require_version('RwOpenidcProviderYang', '1.0')
+from gi.repository import (
+ RwConmanYang,
+ RwProjectVnfdYang,
+ RwProjectNsdYang,
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwRbacInternalYang,
+ RwRbacPlatformYang,
+ RwProjectYang,
+ RwUserYang,
+ RwOpenidcProviderYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+@pytest.fixture(scope='session')
+def complex_scaling_factor():
+ return 10
+
+@pytest.mark.incremental
+class TestRbacSetup(object):
+ def test_onboarded_vnfds_project_independent(self, descriptors, logger, rbac_platform_proxy, rw_conman_proxy, rw_user_proxy,
+ rw_project_proxy, rbac_user_passwd, user_domain, fmt_vnfd_catalog_xpath, session_class, confd_host, fmt_vnfd_id_xpath, rw_rbac_int_proxy):
+ """Same VNFDs on boarded in two different projects. VNFD changes in one project shouldn't affect another."""
+ map_project_user_roles = {
+ 'user1': ('project_test_onboarded_vnfds_project_independent_1', 'rw-project-mano:catalog-admin'),
+ 'user2': ('project_test_onboarded_vnfds_project_independent_2', 'rw-project:project-admin'),
+ }
+ user_to_modify_vnfds, user_not_supposed_to_see_vnfd_changes = 'user1', 'user2'
+
+ modified_vnfd_name = 'test_rbac_vnfd'
+ user_sessions = {}
+ logger.debug('descriptors being used: {}'.format(descriptors))
+
+ for user, project_role_tuple in map_project_user_roles.items():
+ project_name, role = project_role_tuple
+ logger.debug('Creating user {} with {}'.format(user, project_role_tuple))
+
+ rift.auto.mano.create_project(rw_conman_proxy, project_name)
+ rift.auto.mano.create_user(rw_user_proxy, user, rbac_user_passwd, user_domain)
+ if 'platform' in role:
+ rift.auto.mano.assign_platform_role_to_user(rbac_platform_proxy, role, user, user_domain, rw_rbac_int_proxy)
+ else:
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, user,
+ project_name, user_domain, rw_rbac_int_proxy)
+
+ logger.debug('User {} onboarding the packages'.format(user))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, user, rbac_user_passwd)
+ user_sessions[user] = user_session
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(user_session, descriptor, project=project_name)
+
+ vnfd_pxy = user_sessions[user_to_modify_vnfds].proxy(RwProjectVnfdYang)
+ vnfd_xpath = '{}/vnfd'.format(fmt_vnfd_catalog_xpath.format(project=quoted_key(map_project_user_roles[user_to_modify_vnfds][0])))
+ for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+ logger.debug('Changing the vnfd name from {} to {} for user {}'.format(vnfd.name, modified_vnfd_name, user_to_modify_vnfds))
+ vnfd.name = modified_vnfd_name
+ vnfd_pxy.replace_config(fmt_vnfd_id_xpath.format(
+ project=quoted_key(map_project_user_roles[user_to_modify_vnfds][0]), vnfd_id=quoted_key(vnfd.id)), vnfd)
+
+ for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+ assert vnfd.name == modified_vnfd_name
+
+ vnfd_pxy = user_sessions[user_not_supposed_to_see_vnfd_changes].proxy(RwProjectVnfdYang)
+ vnfd_xpath = '{}/vnfd'.format(fmt_vnfd_catalog_xpath.format(project=quoted_key(map_project_user_roles[user_not_supposed_to_see_vnfd_changes][0])))
+ for vnfd in vnfd_pxy.get(vnfd_xpath, list_obj=True).vnfd:
+ logger.debug('Verifying the vnfd name {} for user {} did not change to {}'.format(
+ vnfd.name, user_not_supposed_to_see_vnfd_changes, modified_vnfd_name))
+ assert vnfd.name != modified_vnfd_name
+
+ def test_multi_projects_multi_vnf(
+ self, rw_project_proxy, rw_conman_proxy, cloud_account,
+ cloud_module, descriptors, session_class,
+ confd_host, user_domain, mgmt_session, fmt_nsd_catalog_xpath,
+ logger, rw_rbac_int_proxy):
+ """Creates multiple projects, cloud accounts and then
+ instantiates them. Then it lets the instantiated NS's run for a minute
+ after which gets terminated. Use the SCALE_FACTOR to adjust the number
+ of instantiations."""
+
+ def instantiate_nsr_not_wait(nsr, rwnsr_proxy, project='default'):
+ ns_instance_opdata_xpath = '/project[name={}]/ns-instance-opdata'.format(quoted_key(project))
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name={}]/nsr:ns-instance-config/nsr:nsr'.format(quoted_key(project)), nsr)
+ nsr_opdata = rwnsr_proxy.get('{}/nsr[ns-instance-config-ref={}]'.format(ns_instance_opdata_xpath, quoted_key(nsr.id)))
+ assert nsr_opdata is not None
+
+ nsr_opdata = rwnsr_proxy.get(ns_instance_opdata_xpath)
+ nsr_ = [nsr_ for nsr_ in nsr_opdata.nsr if nsr_.ns_instance_config_ref==nsr.id][0]
+
+ #Creating multiple projects according to the scale factor
+ SCALE_FACTOR = 5
+ PROJECT_LIST = {}
+ for idx in range(1,SCALE_FACTOR+1):
+ rift.auto.mano.create_project(rw_conman_proxy, 'cloud_project_{}'.format(idx))
+ PROJECT_LIST['cloud_project_{}'.format(idx)] = None
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', 'admin', 'cloud_project_{}'
+ .format(idx), 'system', rw_rbac_int_proxy)
+ #Creating cloud accounts, uploading descriptors, instantiating NS
+ for project_name in PROJECT_LIST:
+ rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, project_name)
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor, project=project_name)
+ admin_nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = admin_nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key(project_name)))
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+ PROJECT_LIST[project_name] = nsr
+
+ for project_name, NSR in PROJECT_LIST.items():
+ admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+ admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+ instantiate_nsr_not_wait(NSR, admin_rwnsr_pxy,
+ project=project_name)
+
+ # Waiting for NS's to get started and configured.
+ for project_name in PROJECT_LIST:
+ admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ nsr_opdata = admin_rwnsr_pxy.get('/rw-project:project[rw-project:name={}]/ns-instance-opdata'.format(quoted_key(project_name)))
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+ quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref))
+ admin_rwnsr_pxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+ quoted_key(project_name), quoted_key(nsr.ns_instance_config_ref))
+ admin_rwnsr_pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+ # Letting the started NS's run for a minute after which is terminated
+ start_time = time.time()
+ while (time.time() - start_time) < 60:
+ time.sleep(2)
+ for project_name in PROJECT_LIST:
+ rift.auto.descriptor.terminate_nsr(
+ admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger,
+ project=project_name)
+
+ def test_descriptor_nsr_persistence_check(
+ self, rw_conman_proxy, rw_user_proxy, rw_project_proxy,
+ cloud_account, cloud_module, mgmt_session, descriptors, logger,
+ user_domain, session_class, confd_host, rbac_user_passwd,
+ fmt_nsd_catalog_xpath, rw_rbac_int_proxy):
+ """Creates a project and cloud account for it. Uploads descriptors.
+ Logs in as project-admin and checks if the uploaded descriptors
+ are still there, after which he logs out.
+ Then instantiates nsr. Again logs in as project admin and checks
+ if the instantiated nsr is still there."""
+ # Creating a project, assigning project admin and creating
+ # a cloud account for the project
+ for idx in range(1,6):
+ rift.auto.mano.create_project(rw_conman_proxy, 'xcloud_project_{}'.format(idx))
+ rift.auto.mano.create_user(rw_user_proxy, 'project_admin_{}'.format(idx), rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', 'project_admin_{}'
+ .format(idx), 'xcloud_project_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.create_cloud_account(mgmt_session, cloud_account, 'xcloud_project_{}'.format(idx))
+ #Uploading descriptors and verifying its existence from another user(project admin)
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(mgmt_session, descriptor, project='xcloud_project_{}'.format(idx))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd)
+ project_admin_nsd_pxy = user_session.proxy(RwProjectNsdYang)
+ nsd_catalog = project_admin_nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(project=quoted_key('xcloud_project_{}'.format(idx))))
+ assert nsd_catalog, "Descriptor Not found on try no: {}".format(idx)
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(cloud_account.name, nsd.name, nsd)
+ rift.auto.mano.close_session(user_session)
+ #Instantiating the nsr and verifying its existence from another user(project admin), after which it gets terminated
+ admin_rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ admin_rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+ admin_rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+
+ rift.auto.descriptor.instantiate_nsr(nsr, admin_rwnsr_pxy, logger, project='xcloud_project_{}'.format(idx))
+ user_session = rift.auto.mano.get_session(session_class, confd_host, 'project_admin_{}'.format(idx), rbac_user_passwd)
+ pxy = user_session.proxy(RwNsrYang)
+ nsr_opdata = pxy.get('/rw-project:project[rw-project:name={}]/ns-instance-opdata'.format(quoted_key('xcloud_project_{}'.format(idx))))
+ nsrs = nsr_opdata.nsr
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name={}]/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(
+ quoted_key('xcloud_project_{}'.format(idx)), quoted_key(nsr.ns_instance_config_ref))
+ pxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=60)
+ rift.auto.mano.close_session(user_session)
+ rift.auto.descriptor.terminate_nsr(
+ admin_rwvnfr_pxy, admin_rwnsr_pxy, admin_rwvlr_pxy, logger,
+ project='xcloud_project_{}'.format(idx))
+
+ def delete_records(self, nsd_proxy, vnfd_proxy, project_name='default'):
+ """Delete the NSD & VNFD records."""
+ nsds = nsd_proxy.get(
+ "/rw-project:project[rw-project:name={}]/nsd-catalog/nsd".format(
+ quoted_key(project_name)),
+ list_obj=True)
+ for nsd in nsds.nsd:
+ xpath = (
+ "/rw-project:project[rw-project:name={}]".format(
+ quoted_key(project_name)) +
+ "/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
+ )
+ nsd_proxy.delete_config(xpath)
+
+ nsds = nsd_proxy.get(
+ "/rw-project:project[rw-project:name={}]/nsd-catalog/nsd".format(
+ quoted_key(project_name)),
+ list_obj=True)
+ assert nsds is None or len(nsds.nsd) == 0
+
+ vnfds = vnfd_proxy.get(
+ "/rw-project:project[rw-project:name={}]/vnfd-catalog/vnfd".format(
+ quoted_key(project_name)),
+ list_obj=True)
+ for vnfd_record in vnfds.vnfd:
+ xpath = (
+ "/rw-project:project[rw-project:name={}]/".format(
+ quoted_key(project_name)) +
+ "vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
+ )
+ vnfd_proxy.delete_config(xpath)
+
+ vnfds = vnfd_proxy.get(
+ "/rw-project:project[rw-project:name={}]/vnfd-catalog/vnfd".format(
+ quoted_key(project_name)),
+ list_obj=True)
+ assert vnfds is None or len(vnfds.vnfd) == 0
+
+ def test_delete_project_and_vim_accounts(
+ self, rw_conman_proxy, rw_user_proxy, logger,
+ rbac_user_passwd, user_domain, rw_project_proxy, rw_rbac_int_proxy,
+ mgmt_session, cloud_module, cloud_account, descriptors,
+ fmt_nsd_catalog_xpath, session_class, confd_host):
+ """Testing vim accounts."""
+ # Create a project and three cloud accounts for it.
+ rift.auto.mano.create_project(rw_conman_proxy, 'vim_project')
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, 'rw-project:project-admin', 'admin',
+ 'vim_project', 'system', rw_rbac_int_proxy)
+ for idx in range(1, 4):
+ rift.auto.mano.create_cloud_account(
+ mgmt_session, cloud_account,
+ 'vim_project', 'cloud_account_{}'.format(idx))
+ # Uploading descriptors
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(
+ mgmt_session, descriptor, project='vim_project')
+ nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = nsd_pxy.get_config(fmt_nsd_catalog_xpath.format(
+ project=quoted_key('vim_project')))
+ assert nsd_catalog
+ nsd = nsd_catalog.nsd[0]
+ nsr = rift.auto.descriptor.create_nsr(
+ 'cloud_account_1', nsd.name, nsd)
+ # Instantiating the nsr
+ rwnsr_pxy = mgmt_session.proxy(RwNsrYang)
+ rift.auto.descriptor.instantiate_nsr(
+ nsr, rwnsr_pxy, logger, project='vim_project')
+ # Trying to delete the project before taking the instance down
+ with pytest.raises(
+ Exception,
+ message="Project deletion should've failed"):
+ rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project')
+ # Trying to delete the vim account before taking the instance down
+ with pytest.raises(
+ Exception,
+ message="Vim account deletion should've failed"):
+ rift.auto.mano.delete_cloud_account(
+ mgmt_session, 'cloud_account_1', 'vim_project')
+ # Terminating the nsr
+ rwvnfr_pxy = mgmt_session.proxy(RwVnfrYang)
+ rwvlr_pxy = mgmt_session.proxy(RwVlrYang)
+ rift.auto.descriptor.terminate_nsr(
+ rwvnfr_pxy, rwnsr_pxy, rwvlr_pxy, logger, project='vim_project')
+ # Delete all cloud accounts for the project
+ for idx in range(1, 4):
+ rift.auto.mano.delete_cloud_account(
+ mgmt_session, 'cloud_account_{}'.format(idx), 'vim_project')
+ # Delete the uploaded descriptors
+ vnfd_proxy = mgmt_session.proxy(RwProjectVnfdYang)
+ self.delete_records(nsd_pxy, vnfd_proxy, 'vim_project')
+ # Delete the project
+ rift.auto.mano.delete_project(rw_conman_proxy, 'vim_project')
+ # Check in rw-rbac-internal if project is removed
+ rwinternal_xpath = '/rw-rbac-internal/role'
+ response = (
+ rw_rbac_int_proxy.get(
+ rwinternal_xpath, list_obj=True)
+ ).as_dict()['role']
+ keys = [role['keys'] for role in response if 'keys' in role]
+ for key in keys:
+ assert 'vim_project' not in key, "Improper project deletion"
+
+ @pytest.mark.skipif(
+ not pytest.config.getoption("--complex-scaling"),
+ reason="need --complex-scaling option to run")
+ def test_complex_scaling(
+ self, rw_conman_proxy, rw_user_proxy, rbac_user_passwd,
+ user_domain, rw_project_proxy, rw_rbac_int_proxy, logger,
+ rbac_platform_proxy, user_roles, platform_roles, mgmt_session,
+ cloud_module, cloud_account, rw_ro_account_proxy,
+ tbac, fmt_nsd_catalog_xpath, descriptors, complex_scaling_factor):
+ """Complex scaling - Default values.
+
+ No. of projects - 25 (Two users & two cloud accounts per project)
+ No. of users - 50 (Two roles per user)
+ No. of cloud accounts - 50
+ No. of RO accounts - 25 (50 if you are considering the default 'rift').
+ """
+ # This test can be controlled using complex_scaling_factor fixture
+ logger.debug('Creating projects')
+ for idx in range(1, complex_scaling_factor + 1):
+ rift.auto.mano.create_project(
+ rw_conman_proxy, 'scaling_project_{}'.format(idx)
+ )
+ logger.debug('Create users, cloud accounts double the no. of projects')
+ for idx in range(1, (2 * complex_scaling_factor) + 1):
+ project_index = int((idx + 1) / 2)
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'scaling_user_{}'.format(idx),
+ rbac_user_passwd, user_domain)
+ # Each user has a project role & platform role
+ pr_role = random.choice(user_roles)
+ pl_role = random.choice(platform_roles)
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, pr_role, 'scaling_user_{}'.format(idx),
+ 'scaling_project_{}'.format(project_index), user_domain,
+ rw_rbac_int_proxy)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, pl_role,
+ 'scaling_user_{}'.format(idx), user_domain, rw_rbac_int_proxy)
+ # Creating two cloud accounts for each project
+ rift.auto.mano.create_cloud_account(
+ mgmt_session, cloud_account,
+ 'scaling_project_{}'.format(project_index),
+ 'cloud_account_{}'.format(idx)
+ )
+ logger.debug('Creating RO accounts')
+ for idx in range(1, complex_scaling_factor + 1):
+ rift.auto.mano.create_ro_account(
+ rw_ro_account_proxy, 'ro_account_{}'.format(idx),
+ 'scaling_project_{}'.format(idx)
+ )
+ # Uploading descriptors
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(
+ mgmt_session, descriptor,
+ project='scaling_project_{}'.format(idx)
+ )
+ nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = nsd_pxy.get_config(
+ fmt_nsd_catalog_xpath.format(
+ project=quoted_key('scaling_project_{}'.format(idx))
+ )
+ )
+ assert nsd_catalog
+
+ @pytest.mark.skipif(
+ not pytest.config.getoption("--complex-scaling"),
+ reason="need --complex-scaling option to run")
+ def test_complex_scaling_verification(
+ self, complex_scaling_factor, rw_project_proxy, rw_ro_account_proxy,
+ mgmt_session, fmt_nsd_catalog_xpath, cloud_module, logger):
+ """Reboot verification script for test_complex_scaling."""
+ for idx in range(1, complex_scaling_factor + 1):
+ # Verifying projects
+ logger.debug('Verification: projects, ro accounts started')
+ project_name = 'scaling_project_{}'.format(idx)
+ project_cm_config_xpath = '/project[name={project_name}]/project-state'
+ project_ = rw_project_proxy.get_config(
+ project_cm_config_xpath.format(
+ project_name=quoted_key(project_name)
+ ),
+ list_obj=True
+ )
+ assert project_
+ # Verifying RO Accounts
+ ro_account_name = 'ro_account_{}'.format(idx)
+ ro_obj = rw_ro_account_proxy.get_config(
+ '/project[name={}]/ro-account/account[name={}]'.format(
+ quoted_key(project_name), quoted_key(ro_account_name))
+ )
+ assert ro_obj.name == ro_account_name
+ assert ro_obj.ro_account_type == 'openmano'
+ logger.debug('Verification: descriptors, cloud accounts started')
+ # Verifying Descriptors
+ nsd_pxy = mgmt_session.proxy(RwProjectNsdYang)
+ nsd_catalog = nsd_pxy.get_config(
+ fmt_nsd_catalog_xpath.format(
+ project=quoted_key(project_name)
+ )
+ )
+ assert nsd_catalog
+ for idx in range(1, (2 * complex_scaling_factor) + 1):
+ # Verifying cloud accounts
+ project_index = int((idx + 1) / 2)
+ project_name = 'scaling_project_{}'.format(project_index)
+ cloud_acc_name = 'cloud_account_{}'.format(idx)
+ fmt_cloud_xpath = (
+ '/project[name={project}]/cloud/account[name={account_name}]'
+ )
+ cloud_pxy = mgmt_session.proxy(cloud_module)
+ response = cloud_pxy.get(fmt_cloud_xpath.format(
+ project=quoted_key(project_name),
+ account_name=quoted_key(cloud_acc_name))
+ )
+ assert response.name == cloud_acc_name
+
+
+ def test_change_visibility_same_session(self, session_class, rw_conman_proxy, confd_host, logger,
+ user_domain, project_keyed_xpath, rw_project_proxy, rw_rbac_int_proxy, rw_user_proxy):
+ """admin make changes which is seen by the operator already logged in for the same project.
+
+ oper is logged in. admin assigns oper to a new project X. oper should be able to see the new project X being \
+ in the same session without re-logging-in.
+ """
+ user = 'oper2' if user_domain != 'default' else 'oper'
+ oper_user, oper_passwd = [user]*2
+
+ if user_domain != 'default':
+ rift.auto.mano.create_user(rw_user_proxy, oper_user, oper_passwd, user_domain)
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-oper', oper_user,
+ 'default', user_domain, rw_rbac_int_proxy)
+ oper_session = rift.auto.mano.get_session(session_class, confd_host, oper_user, oper_passwd)
+ oper_conman_pxy = oper_session.proxy(RwProjectYang)
+
+ default_project_cm_config_xpath = project_keyed_xpath.format(project_name=quoted_key('default'))+'/project-state'
+ assert oper_conman_pxy.get_config(default_project_cm_config_xpath, list_obj=True)
+
+ # admin assigns oper 'project-admin' role under a new project
+ new_project = 'project_test_change_visibility_same_session_1'
+ rift.auto.mano.create_project(rw_project_proxy, new_project)
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, 'rw-project:project-admin', oper_user, new_project,
+ user_domain, rw_rbac_int_proxy)
+
+ # Check oper user should be able to access the new project
+ new_project_cm_config_xpath = project_keyed_xpath.format(project_name=quoted_key(new_project))+'/project-state'
+ assert oper_conman_pxy.get_config(new_project_cm_config_xpath, list_obj=True)
+
+ def test_super_admin(
+ self, rw_user_proxy, rbac_platform_proxy, rw_project_proxy,
+ session_class, confd_host, rbac_user_passwd, user_domain,
+ rw_rbac_int_proxy):
+ """Variou tests on the super-admin role."""
+ # Creating two super admins and then deleting the first one.
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'super_admin', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+ 'super_admin', user_domain, rw_rbac_int_proxy)
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'super_admin_2', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, 'rw-rbac-platform:super-admin',
+ 'super_admin_2', user_domain, rw_rbac_int_proxy)
+
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, 'super_admin_2', rbac_user_passwd)
+ pxy = user_session.proxy(RwRbacPlatformYang)
+ role_keyed_path = (
+ "/rbac-platform-config/" +
+ "user[user-name={user}][user-domain={domain}]"
+ )
+ pxy.delete_config(role_keyed_path.format(
+ user=quoted_key('super_admin'), domain=quoted_key(user_domain))
+ )
+ pxy = user_session.proxy(RwUserYang)
+ rift.auto.mano.delete_user(pxy, 'super_admin', user_domain)
+ rift.auto.mano.close_session(user_session)
+
+ @pytest.mark.skipif(not pytest.config.getoption("--tbac"), reason="need --tbac option to run")
+ def test_token_expiry_timeout(self, mgmt_session, rw_user_proxy, rw_conman_proxy, rbac_user_passwd, user_domain,
+ confd_host, logger, rw_project_proxy, rw_rbac_int_proxy, session_class):
+ """Set 30 seconds as token-expiry-timeout; then verifies an user session is automatically expired after 30 secs"""
+ test_user, role = 'user-1', 'rw-project:project-oper'
+ test_proj = 'project_test_token_expiry_timeout'
+ token_expiry_timeout = 30
+
+ logger.debug('Creating user {} under project {} and assigning it {}'.format(test_user, test_proj, role))
+ rift.auto.mano.create_project(rw_conman_proxy, test_proj)
+ rift.auto.mano.create_user(rw_user_proxy, test_user, rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_project_role_to_user(rw_project_proxy, role, test_user, test_proj, user_domain, rw_rbac_int_proxy)
+
+ # admin user setting token_expiry_timeout
+ openidc_provider_xpath = '/rw-openidc-provider:openidc-provider-config'
+ openidc_provider = RwOpenidcProviderYang.YangData_RwOpenidcProvider_OpenidcProviderConfig.from_dict(
+ {'token_expiry_timeout': 30})
+ pxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ logger.debug('Settig token_expiry_timeout to {} secs'.format(token_expiry_timeout))
+ pxy.replace_config(openidc_provider_xpath, openidc_provider)
+
+ # Verifying if token_expiry_timeout is set in openidc-provider-config
+ openidc_provider = pxy.get_config(openidc_provider_xpath)
+ assert openidc_provider
+ assert openidc_provider.token_expiry_timeout == token_expiry_timeout
+
+ def project_access(user_session):
+ user_conman_pxy = user_session.proxy(RwProjectYang)
+ assert user_conman_pxy.get_config('/project[name={}]/project-state'.format(quoted_key(test_proj)), list_obj=True)
+
+ # Log-in as test_user and validate operations under that user getting 'Unauthorized' after time-out
+ user_session = rift.auto.mano.get_session(session_class, confd_host, test_user, rbac_user_passwd)
+ project_access(user_session)
+
+ logger.debug('Sleeping for {} secs'.format(token_expiry_timeout))
+ time.sleep(token_expiry_timeout+5)
+
+ with pytest.raises(Exception, message='logged-in user able to access default project even after token expired'):
+ logger.debug('User {} trying to access default project. It should fail')
+ project_access(user_session)
+
+ # log-in as same user and perform the same operation. It should pass now.
+ user_session = rift.auto.mano.get_session(session_class, confd_host, test_user, rbac_user_passwd)
+ project_access(user_session)
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(BEGIN)
+# Author(s): Balaji Rajappa, Vishnu Narayanan K.A
+# Creation Date: 2017-07-07
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(END)
+
+import gi
+import json
+import urllib.parse
+
+import rift.auto.mano
+import pytest
+import tornado.httpclient
+import time
+import Cryptodome.PublicKey.RSA as RSA
+
+import oic.utils.jwt as oic_jwt
+import oic.utils.keyio as keyio
+from jwkest.jwk import RSAKey
+from rift.rwlib.util import certs
+gi.require_version('RwOpenidcProviderYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwKeyspec', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+from gi.repository import ( # noqa
+ RwOpenidcProviderYang,
+ RwProjectNsdYang,
+ RwProjectYang,
+ RwRbacInternalYang,
+ RwConmanYang,
+)
+from gi.repository.RwKeyspec import quoted_key # noqa
+
+
+PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAs9bRFjWofNeWq2qtsvH9iDZXXbv5NQI6avK1hSt+0W0g3SXW
+hllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62FBLD7ZoWHQDGahkyfhxML4jYA3KUa
+PWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGYQHRAAyATIcNq0kKZMuMAJxC5A7VD
+vQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X58i2gnLqy102Oqj2qZygazj5LLdTE
+sjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuUpH+vFGxXmT6Kr4iEiGIHxAs/HZOS
+9m61z1eHjrce654mpqwbeqhsyQZswyab2IpERwIDAQABAoIBABrnK+gypr3mUnfa
+QZnfcZoK5w7yq9kuOCb/oAAH/bS+qofqvSjj+x8yyXwDN71Hm2EThTm3wfwBkmuj
+UjqiDqAhCbrQhucnhIJKvCKsyr5QbdiUKlS8Yb7u+MhUrZ3lHdJ4k8t7kxSu0ZQD
+QSM2SZx6x4iwJ6yJW1WQ+PIP21n8ejraQ9PzqpuUsNXh05DU8qN/nJHe311D5ZuB
+UnSHdfGaF+EBbNxPLzV028db+L9m3a+h87uZhyqwRlUXP+swlToVNvF74bs+mflz
+r5JN6CwRM3VamnwmcnE77D/zyCsP1Js9LgoxhzhdcUwIOYVWRzUUVRCsrtYOSGF7
+WBzC3WECgYEA0hGtnBw5rryubv0kWDjZoVGvuwDo7BOW1JFXZYJwvweEj7EjWFTY
+bVk+MYs1huG+0NpNuhw6IYmDPIEkoLVNGuTHBMnA+SzQx/xv719b1OmY0Wl8ikYd
+Xlmhxr7mjAJX4eqkVTrBGtsi6TCLdk3HnUdpXJQ0k2aUN6hNFJfsmhUCgYEA2ykP
+hdVzP1ZtXsHEfHSOfRPIzX9gCLETghntAf44MCF+hHZeEVnuTSrfeqELvy5qCarA
+FgjZ77p7q6R7YP2KBQUc/gzZStjGIOCPv9xI8otXrmQRVXOxWNafeDp+TOPa2o9S
+2bBovNmN4Kc+ayktATCVuabMbuGiMIPuRY1pR+sCgYEAmdJSEw7j+hy1ihYZJ/Sw
+/5xmFoQLCtspRgwLOAx07Jzfp6xpGkQ+mouPrA2oq1TgOeSwp8gFlQsxqvtRy9AW
+XswJI2tsv8jeNKKXgGuOPfCzcxxQEpxW4wC1ImglP35zxbzginxUbIrsHF7ssDsy
+IOvqrdzkRs8FV2AI2TyKByUCgYEAuhdDdwCnu0BH3g3qKUNPOiVyfAuMH9U8G1yo
+Quj6DORj6VYYyeLy1dNxr07QCqX+o/a44/zgEQ7ns/cWTGT8rQaKd62xVDx8/62u
+YdtKlah76zhM/6IdFLIo9o20cNWJH8xTLUT9ql2QexGHjraH4FrAx8M6E2zDqy5b
+Q/OvUcECgYAjt8XosvUiRpZ1ugMxwAx316IIEgs2u7k4xdQESnVhIOM3Ex5ikXkK
+I0Hu/2XPH3KO6+6BOhtdZ4qXLf4hikbIisgq3P87Fb2rUElYZjVm3vClYhEzx6ym
+bSWO/cZTpp9L14qMuWzb13pD20GExPOIBh1m0exvoL3M8JhLShutWw==
+-----END RSA PRIVATE KEY-----"""
+
+PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs9bRFjWofNeWq2qtsvH9
+iDZXXbv5NQI6avK1hSt+0W0g3SXWhllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62F
+BLD7ZoWHQDGahkyfhxML4jYA3KUaPWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGY
+QHRAAyATIcNq0kKZMuMAJxC5A7VDvQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X5
+8i2gnLqy102Oqj2qZygazj5LLdTEsjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuU
+pH+vFGxXmT6Kr4iEiGIHxAs/HZOS9m61z1eHjrce654mpqwbeqhsyQZswyab2IpE
+RwIDAQAB
+-----END PUBLIC KEY-----"""
+
+WRONG_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA230Ic8gqYGrIYPffrgvS9ezrI94+TMwIX0A3nyi6nRBOAzuV
+OMP0L4OegDLnAkyUC4ZiH6B9uAJ1mbp4WsX0Q2a3FuGzscCfriV0JKRd4256Mj60
+bGq7xLqR/d62IzLrQ2eJCQe2IspwUIeAW301igwoPIGTfZurQ6drXBcbRVo7adry
+V3+TGsfQVge95IyVAPm4A7kcJsdQu9HsD7Hp9LIM35B3oHCOF7hHP/MEEAz84Q6q
+lpWxdTzSnIxDXWxS2BqPInKOIL5egpn69AfJKLj+QPpQymULx3FCeNKeHmSICHtP
+r0uTckEek0kfFT2W6hIU1w1f+Pkddhc1fY45VQIDAQABAoIBABvOsHZywqOqg659
+WPJk/xo3JOdLbdsu8lSW/zUD5PinKysPrm0drl8irr8RM+E/sHXxVZcqLyNT9HBA
+hqUBdVvgtIuKlsiLXe+jQR6vUFHTGlopRZSCxT08YeinAa5d8h59DIh/WJz5xtb9
+A88Tguf1eFeKFxSP11ff6yMkrkjP1KmvNRoTAC0MU3p/N6UT03roR9v6n4qGPF6p
+/fy6uhLWSJVl7IGFL18DEODid64ShK37VytnvLAMQp8OzL87OdoUW6qrA+z4FP59
+XSpXULxn6ayJG3VChT+Y+nb23rC6gzCYYb3qkSwep2xNqfblP8jL2k/NSlbshdiz
+j3BfK8ECgYEA6D7SMCXZ2hBYu8EBoGRmMLdtM+spps61JOAhgy2i9aNQ/YlKfuS9
+kvNFqT1DEpQsjcRmZIEVb5uJQJYUDx6zj4eUSzkISvziz43dg4RKpC/ktprp9RQ1
+8sAQD4n5Xy2chdTQHKfGl4oF5b16wpi0eE97XptDOlLgPhk167woUQUCgYEA8fAt
+8uZxw0aKkQbF+tYItsWQQP87dJGUeLna4F3T6q5L5WJYCtFqILiFfWwfcjEaOKWV
+JzKr0f9pLrRxXYdFUxNolOhA1hZCqZu2ZzpSlfsPWhp2WflGi6DqzSByhgVuwHbV
+pRl0TRE2dQVgpuXxxiURREHoHJPZRc+3sOwU+BECgYAZJXQssmx8J/jzm1pJu5U1
+ASdZz8Sawxbp/zqhsXdLkXtbeFoQk0PTfXO1d2Sjxldsoi9UAoYHp5ec3qMdX/2h
+NNThsDMtq2QDhSDO9KwASw9AllVuq9mLhzA1/oJ5w76G3xwJfkEKd29cCMAaAd7I
+iBKbk8QbtI2DK8ei1qSm4QKBgAPHvPAOqbhjYcbiVDWXIou4ioh5dHRd0fQQ81qO
+HMGN96Gd58JDg2T/fRZ4mgUuvzojXDFAmW6ujvYr25mag3rI0tmAx4KQ1nnP9Qmn
+36J4ScUepLrDKlcELKcH2sI9U32uXag2vZp2qmMpsljpPt3ZtmtanEXWCY8Nr9ET
+30ABAoGAQ63wGwq1LPS6t/zU6CwOlIzGNnHDquO7o1o/h8IPt3BN6yF0NEVItjdi
+fL2ZwmBCUbO6Y/Jb1kh4a0iohWF33nS3J4Q6wSQUfBMG5jDI7GfuKAgTQl+sMkOM
+xjyKrWs/y7HtiP/2vf83QVEL8Bxr3WXdXHj1EBHFEMWA576J6mk=
+-----END RSA PRIVATE KEY-----"""
+
+roles = (
+ 'rw-rbac-platform:super-admin', 'rw-project:project-admin',
+ 'rw-project-mano:catalog-admin', 'rw-project:project-oper'
+)
+
+
+class Jwt:
+ """Jwt."""
+
+ def __init__(
+ self, private_key=None, public_key=None,
+ iss=None, sub=None, aud=None):
+ """__init___."""
+ self.iss = iss
+ self.sub = sub
+ self.aud = aud
+ self.keyjar = keyio.KeyJar()
+ if private_key:
+ self._add_key_to_keyjar(private_key)
+ if public_key:
+ self._add_key_to_keyjar(public_key, owner=self.iss)
+
+ def _add_key_to_keyjar(self, pkey, owner=''):
+ kb = keyio.KeyBundle()
+ priv_key = RSA.importKey(pkey)
+ key = RSAKey().load_key(priv_key)
+ key.use = "sig"
+ kb.append(key)
+ self.keyjar.add_kb(owner, kb)
+
+ def sign_jwt(self):
+ """sign_jwt."""
+ jwt = oic_jwt.JWT(self.keyjar, iss=self.iss)
+ jws = jwt.pack(sub=self.sub, aud=self.aud)
+ return jws
+
+ def verify(self, jws):
+ """verify."""
+ jwt = oic_jwt.JWT(self.keyjar)
+ return jwt.unpack(jws)
+
+TOKEN_URL = "https://{}:8009/token"
+REVOKE_URL = "https://{}:8009/revoke"
+REST_GET_LOG_CONFIG = "https://{}:8008/api/running/logging"
+
+
+class State:
+ """State."""
+
+ def __init__(self):
+ """__init___."""
+ self.access_token = None
+ _, self.cert, _ = certs.get_bootstrap_cert_and_key()
+
+ def teardown(self):
+ """teardown."""
+ print("\n=== Done with Tests ===")
+
+
+@pytest.fixture(scope="session")
+def state():
+ """state."""
+ st = State()
+ yield st
+ st.teardown()
+
+
+@pytest.mark.incremental
+class TestJwtBearer:
+ """TestJwtBearer."""
+
+ def generate_keys(self, key_format='PEM'):
+ """Generate private & public keys."""
+ private = RSA.generate(2048)
+ pri_key = private.exportKey('PEM')
+ private_key = pri_key.decode('utf-8')
+ public = private.publickey()
+ pub_key = public.exportKey(key_format)
+ public_key = pub_key.decode('utf-8')
+ return private_key, public_key
+
+ def test_client_config(
+ self, rw_user_proxy, rbac_user_passwd, user_domain,
+ rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session):
+ """Setting the public key in config."""
+ client_id = '111'
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'test', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, 'rw-rbac-platform:super-admin', 'test',
+ user_domain, rw_rbac_int_proxy)
+ openidc_xpath = (
+ '/rw-openidc-provider:openidc-provider-config/' +
+ 'rw-openidc-provider:openidc-client' +
+ '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+ )
+ config_object = (
+ RwOpenidcProviderYang.
+ YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+ from_dict({
+ 'client_id': client_id,
+ 'client_name': 'test',
+ 'user_name': 'test',
+ 'user_domain': user_domain,
+ 'public_key': PUBLIC_KEY}))
+ rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+
+ def test_get_token(self, state, confd_host):
+ """Get the token."""
+ jwt = Jwt(private_key=PRIVATE_KEY, iss="111",
+ sub="test", aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ resp = client.fetch(req)
+ token_resp = json.loads(resp.body.decode('utf-8'))
+ assert "access_token" in token_resp
+ state.access_token = token_resp["access_token"]
+
+ def test_api_access(self, state, confd_host):
+ """Test api access."""
+ assert state.access_token is not None
+ req = tornado.httpclient.HTTPRequest(
+ url=REST_GET_LOG_CONFIG.format(confd_host),
+ headers={
+ "Authorization": "Bearer " + state.access_token,
+ "Accept": "application/json",
+ },
+ ca_certs=state.cert,
+ )
+ client = tornado.httpclient.HTTPClient()
+ resp = client.fetch(req)
+ assert resp.code == 200 or resp.code == 204
+
+ def test_revoke_token(self, state, confd_host):
+ """Revoke a token."""
+ assert state.access_token is not None
+ body_tuple = (
+ ("token", state.access_token),
+ ("token_type_hint", "access_token"),
+ )
+ req = tornado.httpclient.HTTPRequest(
+ url=REVOKE_URL.format(confd_host),
+ method='POST',
+ headers={
+ "Authorization": "Bearer " + state.access_token,
+ "Content-Type": "application/x-www-form-urlencoded",
+ },
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ client.fetch(req)
+
+ def test_api_access_invalid_token(self, state, confd_host):
+ """Test access with invalid token."""
+ assert state.access_token is not None
+ req = tornado.httpclient.HTTPRequest(
+ url=REST_GET_LOG_CONFIG.format(confd_host),
+ headers={
+ "Authorization": "Bearer " + state.access_token,
+ "Accept": "application/json",
+ },
+ ca_certs=state.cert,
+ )
+ client = tornado.httpclient.HTTPClient()
+ with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+ client.fetch(req)
+ assert excinfo.value.code == 401
+ state.access_token = None
+
+ def test_invalid_client_id(self, state, confd_host):
+ """Test with invalid client-id."""
+ jwt = Jwt(private_key=PRIVATE_KEY, iss="invalid_client",
+ sub="test", aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+ client.fetch(req)
+ assert excinfo.value.code == 400
+
+ def test_invalid_key(self, state, confd_host):
+ """Test with invalid key."""
+ jwt = Jwt(private_key=WRONG_PRIVATE_KEY, iss="111",
+ sub="test", aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+ client.fetch(req)
+ assert excinfo.value.code == 400
+
+ def test_invalid_user(self, state, confd_host):
+ """Test with invalid user."""
+ jwt = Jwt(private_key=PRIVATE_KEY, iss="111",
+ sub="invalid_user", aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ with pytest.raises(tornado.httpclient.HTTPError) as excinfo:
+ client.fetch(req)
+ assert excinfo.value.code == 400
+
+ def test_check_basic_functionality(
+ self, rw_user_proxy, rbac_user_passwd, user_domain, state,
+ rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session,
+ session_class, confd_host, rw_project_proxy, cloud_module,
+ cloud_account, descriptors, fmt_nsd_catalog_xpath, logger):
+ """Check basic functionality."""
+ # Add the users to our config with the public key.
+ logger.debug('Create users and add roles for them')
+ for idx in range(1, 5):
+ client_id = '111{}'.format(idx)
+ user_name = 'test_{}'.format(idx)
+ role = roles[idx - 1]
+ rift.auto.mano.create_user(
+ rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+ if 'platform' in role:
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, role, user_name,
+ user_domain, rw_rbac_int_proxy)
+ else:
+ rift.auto.mano.assign_project_role_to_user(
+ rw_project_proxy, role, user_name,
+ 'default', user_domain, rw_rbac_int_proxy)
+ openidc_xpath = (
+ '/rw-openidc-provider:openidc-provider-config/' +
+ 'rw-openidc-provider:openidc-client[rw-openidc-provider:' +
+ 'client-id={}]'.format(quoted_key(client_id))
+ )
+ # Generate PEM keys for some, while for others its openssh keys
+ logger.debug('Generate private & public keys for the user')
+ if idx % 2 == 0:
+ key_format = 'OpenSSH'
+ else:
+ key_format = 'PEM'
+ private_key, public_key = self.generate_keys(key_format)
+ config_object = (
+ RwOpenidcProviderYang.
+ YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+ from_dict({
+ 'client_id': client_id,
+ 'client_name': user_name,
+ 'user_name': user_name,
+ 'user_domain': user_domain,
+ 'public_key': public_key}))
+ rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+ # Create the jason web signature
+ jwt = Jwt(private_key=private_key, iss=client_id,
+ sub=user_name, aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+ # Get the token using the signature
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ resp = client.fetch(req)
+ token_resp = json.loads(resp.body.decode('utf-8'))
+ assert "access_token" in token_resp
+ access_token = token_resp["access_token"]
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, user_name,
+ rbac_user_passwd, access_token=access_token)
+ rw_rbac_internal_proxy = user_session.proxy(RwRbacInternalYang)
+ # According to the role, checking the functionality
+ if role == 'rw-rbac-platform:super-admin':
+ project_pxy = user_session.proxy(RwProjectYang)
+ rift.auto.mano.assign_project_role_to_user(
+ project_pxy, 'rw-project:project-admin', 'oper', 'default',
+ 'system', rw_rbac_internal_proxy)
+ elif role == 'rw-project:project-admin':
+ logger.debug('Creating cloud account.')
+ rift.auto.mano.create_cloud_account(
+ user_session, cloud_account)
+ elif role == 'rw-project-mano:catalog-admin':
+ logger.debug('Uploading descriptors')
+ for descriptor in descriptors:
+ rift.auto.descriptor.onboard(
+ user_session, descriptor, project='default')
+ nsd_pxy = user_session.proxy(RwProjectNsdYang)
+ nsd_catalog = nsd_pxy.get_config(
+ fmt_nsd_catalog_xpath.format(
+ project=quoted_key('default')))
+ assert nsd_catalog
+ else:
+ project_xpath = '/project[name={project_name}]/project-state'
+ rw_project_proxy = user_session.proxy(RwProjectYang)
+ project = rw_project_proxy.get_config(
+ project_xpath.format(project_name=quoted_key('default')), list_obj=True)
+ assert project
+
+ def test_with_expired_token(
+ self, state, rw_user_proxy, rbac_user_passwd, user_domain,
+ rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session,
+ session_class, confd_host, cloud_module, cloud_account,
+ logger):
+ """Test with an expired token."""
+ # Set the expiry time for the token as 'expiry_timeout' seconds.
+ client_id = '222'
+ user_name = 'expired_token_user'
+ expiry_timeout = 1
+ rift.auto.mano.create_user(
+ rw_user_proxy, user_name, rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, 'rw-rbac-platform:super-admin', user_name,
+ user_domain, rw_rbac_int_proxy)
+
+ openidc_provider_xpath = '/rw-openidc-provider:openidc-provider-config'
+ openidc_provider = (
+ RwOpenidcProviderYang.
+ YangData_RwOpenidcProvider_OpenidcProviderConfig.from_dict({
+ 'token_expiry_timeout': expiry_timeout}))
+ pxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ pxy.replace_config(openidc_provider_xpath, openidc_provider)
+
+ # Verify if token_expiry_timeout is set in openidc-provider-config
+ openidc_provider = pxy.get_config(openidc_provider_xpath)
+ assert openidc_provider
+ assert openidc_provider.token_expiry_timeout == expiry_timeout
+ # Set the public key in our config
+ openidc_xpath = (
+ '/rw-openidc-provider:openidc-provider-config/' +
+ 'rw-openidc-provider:openidc-client' +
+ '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+ )
+ config_object = (
+ RwOpenidcProviderYang.
+ YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+ from_dict({
+ 'client_id': client_id,
+ 'client_name': user_name,
+ 'user_name': user_name,
+ 'user_domain': user_domain,
+ 'public_key': PUBLIC_KEY}))
+ rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+ # Generate the signature
+ jwt = Jwt(private_key=PRIVATE_KEY, iss=client_id,
+ sub=user_name, aud="https://{}:8009".format(confd_host))
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+ logger.debug('Get the token using the signature')
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL.format(confd_host),
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ resp = client.fetch(req)
+ token_resp = json.loads(resp.body.decode('utf-8'))
+ assert "access_token" in token_resp
+ access_token = token_resp["access_token"]
+ # Wait out the expiry timout
+ user_session = rift.auto.mano.get_session(
+ session_class, confd_host, user_name,
+ rbac_user_passwd, access_token=access_token)
+ time.sleep(expiry_timeout + 5)
+ with pytest.raises(
+ Exception,
+ message='Task done with expired token'):
+ user_conman_pxy = user_session.proxy(RwProjectYang)
+ assert user_conman_pxy.get_config(
+ '/project[name={}]/project-state'.format(quoted_key('default')), list_obj=True)
--- /dev/null
+{
+ "uint64": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "uint32": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "uint16": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "uint8": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "decimal64": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580.717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "int64": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "int32": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "int16": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "int8": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364, "", null,
+ "rif~t¶*¤500"],
+ "string": [0, 1, -1, "riftio", "riftio@riftio.com",
+ 922337203685477580717263457123647172364,
+ 1313213.1321313, "~~&^%*()", "", null,
+ "rif~t¶*¤500"],
+ "union": ["1.1.1.1", null, 0, 1, -1,
+ 22337203685477580717263457123647172364,
+ 1313213.1321313, "~~&^%*()", "", null,
+ "rif~t¶*¤500", "256.256.256.256",
+ "0.0.0.0"]
+}
--- /dev/null
+# !/usr/bin/env python
+"""
+#
+# Copyright 2017 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@author Anoop Valluthadam (anoop.valluthadam@riftio.com), Vishnu Narayanan K.A
+@brief Create/Delete/Other operations of Projects and User
+"""
+
+import os
+
+from utils.imports import * # noqa
+from utils.traversal_engine import traverse_it
+from utils.utils import parse_input_data
+from utils.tbac_token_utils import * # noqa
+
+headers = {'content-type': 'application/json'}
+
+
+class TestRestAPI(object):
+ """TestRestAPI."""
+
+ def traverse_and_find_all_keys(self, it, key_dict):
+ """Find all keys and their data types present in the json schema.
+
+ Args:
+ it (dict): the json
+ key_dict (dict): will be populated with the keys & their datatypes
+ Returns:
+ key_dict (dict): will be populated with the keys & their datatypes
+ """
+ if (isinstance(it, list)):
+ for item in it:
+ self.traverse_and_find_all_keys(item, key_dict)
+ return key_dict
+
+ elif (isinstance(it, dict)):
+ for key in it.keys():
+ if key == 'name' and 'data-type' in it:
+ if isinstance(it['data-type'], dict):
+ dtype = next(iter(it['data-type']))
+ if ((it[key] in key_dict) and
+ (dtype not in key_dict[it[key]])):
+
+ key_dict[it[key]].append(dtype)
+
+ elif it[key] not in key_dict:
+ key_dict[it[key]] = [dtype]
+ else:
+ pass
+ else:
+ if ((it[key] in key_dict) and
+ (it['data-type'] not in key_dict[it[key]])):
+
+ key_dict[it[key]].append(it['data-type'])
+
+ elif it[key] not in key_dict:
+ key_dict[it[key]] = [it['data-type']]
+ else:
+ pass
+ self.traverse_and_find_all_keys(it[key], key_dict)
+ return key_dict
+ else:
+ return None
+
+ def create_post_call(
+ self, data, confd_host, url, logger, state, number_of_tests):
+ """Create the POST.
+
+ Args:
+ data (dict): JSON data
+ confd_host (string): IP addr of the Launchpad
+ url (string): the url for the post call
+ logger (logger Object): log object
+ state: for the tbac token
+ number_of_tests (list): test & error cases count
+ Returns:
+ number_of_tests (list): test & error cases count
+ Raises:
+ requests.exceptions.ConnectionError: in case we loose connection
+ from the Launchpad, mostly when Launchpad crashes
+
+ """
+ number_of_tests[0] += 1
+
+ key = next(iter(data))
+ if 'project' in url:
+ name = str(data[key][0]["name"])
+ new_url = url + name
+ elif 'user-config' in url:
+ name = str(data[key]['user'][0]['user-name'])
+ domain = str(data[key]['user'][0]['user-domain'])
+ data = data['rw-user:user-config']
+ new_url = url + '/user/' + name + ',' + domain
+ else:
+ raise Exception('Something wrong with the URL')
+
+ logger.debug(data)
+ headers['Authorization'] = 'Bearer ' + state.access_token
+ try:
+ create_result = state.session.post(
+ url, data=json.dumps(data),
+ headers=headers, verify=False)
+ get_result = state.session.get(
+ new_url,
+ headers=headers, verify=False)
+ delete_result = state.session.delete(
+ new_url,
+ headers=headers, verify=False)
+ except requests.exceptions.ConnectionError:
+ logger.error('Crashed for the data: \n{}'.format(data))
+ number_of_tests[1] += 1
+ exit(1)
+
+ logger.debug(
+ 'create result:\n{}\n{}\n'.format(
+ create_result.status_code, create_result.text))
+ logger.debug(
+ 'get result:\n{}\n{}\n'.format(
+ get_result.status_code, get_result.text))
+ logger.debug(
+ 'delete result:\n{}\n{}\n'.format(
+ delete_result.status_code, delete_result.text))
+
+ return number_of_tests
+
+ def get_schema(self, confd_host, url, property_=None):
+ """Get schema.
+
+ Args:
+ confd_host (string): Launchpad IP
+ property_ (string): vnfd/nsd/user etc
+ Returns:
+ schema (JSON): Schema in JSON format
+ """
+ headers = {'content-type': 'application/json'}
+
+ result = requests.get(url, auth=HTTPBasicAuth('admin', 'admin'),
+ headers=headers, verify=False)
+
+ schema = json.loads(result.text)
+
+ return schema
+
+ def traverse_call(
+ self, test_input, data, k_dict, confd_host, logger,
+ number_of_tests, depth, url, state):
+ """Traversing through the values from the test IP JSON.
+
+ Args:
+ test_input (string): the data from the test IP JSON
+ data (json): schema data
+ k_dict (dict): dictionary of the JSON IP
+ confd_host (string): Launchpad IP
+ logger (logger obj): log object
+ number_of_tests (list): test & error cases count
+ depth (int): depth of the json
+ url (string): the url for the post call
+ state: for the tbac token
+ Returns:
+ number_of_tests (list): test & error cases count
+ """
+ for key, kdata_types in k_dict.items():
+ for kdata_type in kdata_types:
+ if kdata_type in test_input:
+ test_values = test_input[kdata_type]
+ for test_value in test_values:
+ test_data = {kdata_type: test_value}
+ # Actual traversal call which will generate data
+ json_data = traverse_it(
+ data, original=False,
+ test_value=test_data, test_key=key,
+ max_depth=depth)
+
+ number_of_tests = self.create_post_call(
+ json_data, confd_host, url,
+ logger, state, number_of_tests)
+
+ return number_of_tests
+
+ def test_get_token(
+ self, rw_user_proxy, rbac_user_passwd, user_domain,
+ rbac_platform_proxy, rw_rbac_int_proxy, mgmt_session, state):
+ """Setting the public key in config and get token."""
+ client_id = '1234'
+ rift.auto.mano.create_user(
+ rw_user_proxy, 'test', rbac_user_passwd, user_domain)
+ rift.auto.mano.assign_platform_role_to_user(
+ rbac_platform_proxy, 'rw-rbac-platform:super-admin', 'test',
+ user_domain, rw_rbac_int_proxy)
+ openidc_xpath = (
+ '/rw-openidc-provider:openidc-provider-config/' +
+ 'rw-openidc-provider:openidc-client' +
+ '[rw-openidc-provider:client-id={}]'.format(quoted_key(client_id))
+ )
+ config_object = (
+ RwOpenidcProviderYang.
+ YangData_RwOpenidcProvider_OpenidcProviderConfig_OpenidcClient.
+ from_dict({
+ 'client_id': client_id,
+ 'client_name': 'test',
+ 'user_name': 'test',
+ 'user_domain': 'tbacdomain',
+ 'public_key': PUBLIC_KEY}))
+ rw_open_idc_proxy = mgmt_session.proxy(RwOpenidcProviderYang)
+ rw_open_idc_proxy.create_config(openidc_xpath, config_object)
+
+ # Get the token
+ jwt = Jwt(private_key=PRIVATE_KEY, iss=client_id,
+ sub="test", aud="https://locahost:8009")
+ jws = jwt.sign_jwt()
+ body_tuple = (
+ ("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"),
+ ("assertion", jws),
+ )
+
+ req = tornado.httpclient.HTTPRequest(
+ url=TOKEN_URL,
+ method='POST',
+ headers={"Content-Type": "application/x-www-form-urlencoded"},
+ ca_certs=state.cert,
+ body=urllib.parse.urlencode(body_tuple)
+ )
+ client = tornado.httpclient.HTTPClient()
+ resp = client.fetch(req)
+ token_resp = json.loads(resp.body.decode('utf-8'))
+ assert "access_token" in token_resp
+ state.access_token = token_resp["access_token"]
+
+ auth_value = 'Bearer ' + state.access_token
+ state.session = requests.Session()
+ state.session.headers.update({
+ 'content-type': 'application/json',
+ 'Authorization': auth_value
+ })
+
+ def test_user_restapi(self, confd_host, logger, state):
+ """Test user creation restapi."""
+ rift_install = os.getenv('RIFT_INSTALL')
+ file_path = (
+ '{}/usr/rift/systemtest/pytest/'.format(rift_install) +
+ 'system/ns/restapitest/test_inputs/test_inputs.json')
+ test_input = parse_input_data(file_path)
+ schema_url_for_user = (
+ "https://{}:8008/v2/api/schema/user-config/".format(confd_host)
+ )
+ url_for_user = (
+ "https://{}:8008/v2/api/config/user-config".format(confd_host)
+ )
+ data = self.get_schema(confd_host, schema_url_for_user)
+
+ key_dict = {}
+ k_dict = self.traverse_and_find_all_keys(data, key_dict)
+
+ number_of_tests = [0, 0] # [total no. of tests, no. of erros]
+ # Traverse with depth but with out any specific key
+ for depth in range(14, 15):
+ number_of_tests = self.traverse_call(
+ test_input, data["user-config"], k_dict, confd_host,
+ logger, number_of_tests, depth, url_for_user, state)
+ logger.debug(
+ 'No of tests ran for userapi: {}'.format(number_of_tests[0]))
+ logger.debug(
+ 'No of crashed tests for userapi:{}'.format(number_of_tests[1]))
+
+ def test_project_restapi(self, confd_host, logger, state):
+ """Test project creation restapi."""
+ rift_install = os.getenv('RIFT_INSTALL')
+ file_path = (
+ '{}/usr/rift/systemtest/pytest/'.format(rift_install) +
+ 'system/ns/restapitest/test_inputs/test_inputs.json')
+ test_input = parse_input_data(file_path)
+
+ schema_url_for_project = (
+ "https://{}:8008/v2/api/schema/project/".format(confd_host)
+ )
+ url_for_project = (
+ "https://{}:8008/v2/api/config/project/".format(confd_host)
+ )
+ data = self.get_schema(confd_host, schema_url_for_project)
+
+ key_dict = {}
+ k_dict = self.traverse_and_find_all_keys(data, key_dict)
+
+ number_of_tests = [0, 0] # [total no. of tests, no. of erros]
+
+ # Traverse with depth but with out any specific key
+ for depth in range(5, 6):
+ number_of_tests = self.traverse_call(
+ test_input, data["project"], k_dict, confd_host,
+ logger, number_of_tests, depth, url_for_project, state)
+ logger.debug(
+ 'No of tests ran for projectapi: {}'.format(number_of_tests[0]))
+ logger.debug(
+ 'No of crashed tests for projectapi:{}'.format(number_of_tests[1]))
--- /dev/null
+# !/usr/bin/env python
+"""
+#
+# Copyright 2017 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+@file imports.py
+@author Anoop Valluthadam (anoop.valluthadam@riftio.com)
+"""
+
+import json # noqa
+import socket # noqa
+import struct # noqa
+import requests # noqa
+import random # noqa
+import logging # noqa
+import uuid # noqa
+import decimal # noqa
+import argparse # noqa
+import datetime # noqa
+import time # noqa
+
+from logging import handlers # noqa
+from signal import SIGTERM # noqa
+from requests.auth import HTTPBasicAuth # noqa
+from random import choice # noqa
+from string import ascii_lowercase # noqa
--- /dev/null
+#!/usr/bin/env python3
+"""
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(BEGIN)
+# Author(s): Balaji Rajappa, Vishnu Narayanan K.A
+# Creation Date: 2017-07-07
+# RIFT_IO_STANDARD_COPYRIGHT_HEADER(END)
+
+import gi
+import json
+import urllib.parse
+
+import rift.auto.mano
+import pytest
+import tornado.httpclient
+import time
+import Cryptodome.PublicKey.RSA as RSA
+
+import oic.utils.jwt as oic_jwt
+import oic.utils.keyio as keyio
+from jwkest.jwk import RSAKey
+from rift.rwlib.util import certs
+gi.require_version('RwOpenidcProviderYang', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwKeyspec', '1.0')
+gi.require_version('RwConmanYang', '1.0')
+from gi.repository import ( # noqa
+ RwOpenidcProviderYang,
+ RwProjectNsdYang,
+ RwProjectYang,
+ RwRbacInternalYang,
+ RwConmanYang,
+)
+from gi.repository.RwKeyspec import quoted_key # noqa
+
+
+@pytest.fixture(scope='session')
+def rbac_user_passwd():
+ """A common password being used for all rbac users."""
+ return 'mypasswd'
+
+
+@pytest.fixture(scope='session')
+def user_domain(tbac):
+ """user-domain being used in this rbac test."""
+ if tbac:
+ return 'tbacdomain'
+ return 'system'
+
+
+PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAs9bRFjWofNeWq2qtsvH9iDZXXbv5NQI6avK1hSt+0W0g3SXW
+hllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62FBLD7ZoWHQDGahkyfhxML4jYA3KUa
+PWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGYQHRAAyATIcNq0kKZMuMAJxC5A7VD
+vQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X58i2gnLqy102Oqj2qZygazj5LLdTE
+sjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuUpH+vFGxXmT6Kr4iEiGIHxAs/HZOS
+9m61z1eHjrce654mpqwbeqhsyQZswyab2IpERwIDAQABAoIBABrnK+gypr3mUnfa
+QZnfcZoK5w7yq9kuOCb/oAAH/bS+qofqvSjj+x8yyXwDN71Hm2EThTm3wfwBkmuj
+UjqiDqAhCbrQhucnhIJKvCKsyr5QbdiUKlS8Yb7u+MhUrZ3lHdJ4k8t7kxSu0ZQD
+QSM2SZx6x4iwJ6yJW1WQ+PIP21n8ejraQ9PzqpuUsNXh05DU8qN/nJHe311D5ZuB
+UnSHdfGaF+EBbNxPLzV028db+L9m3a+h87uZhyqwRlUXP+swlToVNvF74bs+mflz
+r5JN6CwRM3VamnwmcnE77D/zyCsP1Js9LgoxhzhdcUwIOYVWRzUUVRCsrtYOSGF7
+WBzC3WECgYEA0hGtnBw5rryubv0kWDjZoVGvuwDo7BOW1JFXZYJwvweEj7EjWFTY
+bVk+MYs1huG+0NpNuhw6IYmDPIEkoLVNGuTHBMnA+SzQx/xv719b1OmY0Wl8ikYd
+Xlmhxr7mjAJX4eqkVTrBGtsi6TCLdk3HnUdpXJQ0k2aUN6hNFJfsmhUCgYEA2ykP
+hdVzP1ZtXsHEfHSOfRPIzX9gCLETghntAf44MCF+hHZeEVnuTSrfeqELvy5qCarA
+FgjZ77p7q6R7YP2KBQUc/gzZStjGIOCPv9xI8otXrmQRVXOxWNafeDp+TOPa2o9S
+2bBovNmN4Kc+ayktATCVuabMbuGiMIPuRY1pR+sCgYEAmdJSEw7j+hy1ihYZJ/Sw
+/5xmFoQLCtspRgwLOAx07Jzfp6xpGkQ+mouPrA2oq1TgOeSwp8gFlQsxqvtRy9AW
+XswJI2tsv8jeNKKXgGuOPfCzcxxQEpxW4wC1ImglP35zxbzginxUbIrsHF7ssDsy
+IOvqrdzkRs8FV2AI2TyKByUCgYEAuhdDdwCnu0BH3g3qKUNPOiVyfAuMH9U8G1yo
+Quj6DORj6VYYyeLy1dNxr07QCqX+o/a44/zgEQ7ns/cWTGT8rQaKd62xVDx8/62u
+YdtKlah76zhM/6IdFLIo9o20cNWJH8xTLUT9ql2QexGHjraH4FrAx8M6E2zDqy5b
+Q/OvUcECgYAjt8XosvUiRpZ1ugMxwAx316IIEgs2u7k4xdQESnVhIOM3Ex5ikXkK
+I0Hu/2XPH3KO6+6BOhtdZ4qXLf4hikbIisgq3P87Fb2rUElYZjVm3vClYhEzx6ym
+bSWO/cZTpp9L14qMuWzb13pD20GExPOIBh1m0exvoL3M8JhLShutWw==
+-----END RSA PRIVATE KEY-----"""
+
+PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs9bRFjWofNeWq2qtsvH9
+iDZXXbv5NQI6avK1hSt+0W0g3SXWhllNenZAhFpXHzZvJk2qEoNIRXIeonX4N62F
+BLD7ZoWHQDGahkyfhxML4jYA3KUaPWGeUvMlRPkoR4NjHA3zXQvD2FwTtcKCulGY
+QHRAAyATIcNq0kKZMuMAJxC5A7VDvQVb7vOaN01YxJt+L6KF0v4ZiYdse5yBI/X5
+8i2gnLqy102Oqj2qZygazj5LLdTEsjgsiC9ln6kesbRayXiqi+RnF+BeKKlwGCuU
+pH+vFGxXmT6Kr4iEiGIHxAs/HZOS9m61z1eHjrce654mpqwbeqhsyQZswyab2IpE
+RwIDAQAB
+-----END PUBLIC KEY-----"""
+
+WRONG_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEA230Ic8gqYGrIYPffrgvS9ezrI94+TMwIX0A3nyi6nRBOAzuV
+OMP0L4OegDLnAkyUC4ZiH6B9uAJ1mbp4WsX0Q2a3FuGzscCfriV0JKRd4256Mj60
+bGq7xLqR/d62IzLrQ2eJCQe2IspwUIeAW301igwoPIGTfZurQ6drXBcbRVo7adry
+V3+TGsfQVge95IyVAPm4A7kcJsdQu9HsD7Hp9LIM35B3oHCOF7hHP/MEEAz84Q6q
+lpWxdTzSnIxDXWxS2BqPInKOIL5egpn69AfJKLj+QPpQymULx3FCeNKeHmSICHtP
+r0uTckEek0kfFT2W6hIU1w1f+Pkddhc1fY45VQIDAQABAoIBABvOsHZywqOqg659
+WPJk/xo3JOdLbdsu8lSW/zUD5PinKysPrm0drl8irr8RM+E/sHXxVZcqLyNT9HBA
+hqUBdVvgtIuKlsiLXe+jQR6vUFHTGlopRZSCxT08YeinAa5d8h59DIh/WJz5xtb9
+A88Tguf1eFeKFxSP11ff6yMkrkjP1KmvNRoTAC0MU3p/N6UT03roR9v6n4qGPF6p
+/fy6uhLWSJVl7IGFL18DEODid64ShK37VytnvLAMQp8OzL87OdoUW6qrA+z4FP59
+XSpXULxn6ayJG3VChT+Y+nb23rC6gzCYYb3qkSwep2xNqfblP8jL2k/NSlbshdiz
+j3BfK8ECgYEA6D7SMCXZ2hBYu8EBoGRmMLdtM+spps61JOAhgy2i9aNQ/YlKfuS9
+kvNFqT1DEpQsjcRmZIEVb5uJQJYUDx6zj4eUSzkISvziz43dg4RKpC/ktprp9RQ1
+8sAQD4n5Xy2chdTQHKfGl4oF5b16wpi0eE97XptDOlLgPhk167woUQUCgYEA8fAt
+8uZxw0aKkQbF+tYItsWQQP87dJGUeLna4F3T6q5L5WJYCtFqILiFfWwfcjEaOKWV
+JzKr0f9pLrRxXYdFUxNolOhA1hZCqZu2ZzpSlfsPWhp2WflGi6DqzSByhgVuwHbV
+pRl0TRE2dQVgpuXxxiURREHoHJPZRc+3sOwU+BECgYAZJXQssmx8J/jzm1pJu5U1
+ASdZz8Sawxbp/zqhsXdLkXtbeFoQk0PTfXO1d2Sjxldsoi9UAoYHp5ec3qMdX/2h
+NNThsDMtq2QDhSDO9KwASw9AllVuq9mLhzA1/oJ5w76G3xwJfkEKd29cCMAaAd7I
+iBKbk8QbtI2DK8ei1qSm4QKBgAPHvPAOqbhjYcbiVDWXIou4ioh5dHRd0fQQ81qO
+HMGN96Gd58JDg2T/fRZ4mgUuvzojXDFAmW6ujvYr25mag3rI0tmAx4KQ1nnP9Qmn
+36J4ScUepLrDKlcELKcH2sI9U32uXag2vZp2qmMpsljpPt3ZtmtanEXWCY8Nr9ET
+30ABAoGAQ63wGwq1LPS6t/zU6CwOlIzGNnHDquO7o1o/h8IPt3BN6yF0NEVItjdi
+fL2ZwmBCUbO6Y/Jb1kh4a0iohWF33nS3J4Q6wSQUfBMG5jDI7GfuKAgTQl+sMkOM
+xjyKrWs/y7HtiP/2vf83QVEL8Bxr3WXdXHj1EBHFEMWA576J6mk=
+-----END RSA PRIVATE KEY-----"""
+
+roles = (
+ 'rw-rbac-platform:super-admin', 'rw-project:project-admin',
+ 'rw-project-mano:catalog-admin', 'rw-project:project-oper'
+)
+
+
+class Jwt:
+ """Jwt."""
+
+ def __init__(
+ self, private_key=None, public_key=None,
+ iss=None, sub=None, aud=None):
+ """__init___."""
+ self.iss = iss
+ self.sub = sub
+ self.aud = aud
+ self.keyjar = keyio.KeyJar()
+ if private_key:
+ self._add_key_to_keyjar(private_key)
+ if public_key:
+ self._add_key_to_keyjar(public_key, owner=self.iss)
+
+ def _add_key_to_keyjar(self, pkey, owner=''):
+ kb = keyio.KeyBundle()
+ priv_key = RSA.importKey(pkey)
+ key = RSAKey().load_key(priv_key)
+ key.use = "sig"
+ kb.append(key)
+ self.keyjar.add_kb(owner, kb)
+
+ def sign_jwt(self):
+ """sign_jwt."""
+ jwt = oic_jwt.JWT(self.keyjar, iss=self.iss)
+ jws = jwt.pack(sub=self.sub, aud=self.aud)
+ return jws
+
+ def verify(self, jws):
+ """verify."""
+ jwt = oic_jwt.JWT(self.keyjar)
+ return jwt.unpack(jws)
+
+TOKEN_URL = "https://localhost:8009/token"
+REVOKE_URL = "https://localhost:8009/revoke"
+REST_GET_LOG_CONFIG = "https://localhost:8008/api/running/logging"
+
+
+class State:
+ """State."""
+
+ def __init__(self):
+ """__init___."""
+ self.access_token = None
+ _, self.cert, _ = certs.get_bootstrap_cert_and_key()
+
+ def teardown(self):
+ """teardown."""
+ print("\n=== Done with Tests ===")
+
+
+@pytest.fixture(scope="session")
+def state():
+ """state."""
+ st = State()
+ yield st
+ st.teardown()
\ No newline at end of file
--- /dev/null
+
+from .imports import * # noqa
+
+
+def populate_data(data_type, original=True, test_value={}, keys={}):
+ """Generate data from schema depends its Data-type
+ Args:
+ data_type (string): data_type from the test IP json
+ original (boolean): if it is True,
+ will generate normal JSON with randon
+ values
+ test_value (dict): will be like this {'string': '-1'}, means, if
+ string data typr comes, data will be -1
+ keys (dict): if this is present, value testing for this particular
+ key
+ Returns:
+ string_ (string): string value
+ """
+
+ if original:
+ if (isinstance(data_type, dict)):
+ if 'enumeration' in data_type:
+ string_ = list(data_type['enumeration']['enum'])[0]
+ return string_
+ if 'leafref' in data_type:
+ data_type = 'leafref'
+ if 'union' in data_type:
+ data_type = 'union'
+
+ if data_type == 'string':
+ string_ = ''.join(choice(ascii_lowercase) for i in range(12))
+ elif data_type == 'uint64':
+ string_ = uuid.uuid4().int & (1 << 64) - 1
+ elif data_type == 'uint8':
+ string_ = uuid.uuid4().int & (1 << 8) - 1
+ elif data_type == 'uint32':
+ string_ = uuid.uuid4().int & (1 << 32) - 1
+ elif data_type == 'uint16':
+ string_ = uuid.uuid4().int & (1 << 16) - 1
+ elif data_type == 'decimal64':
+ string_ = float(decimal.Decimal('%d.%d'
+ % (random.randint(0, 2134342),
+ random.randint(0, 999))))
+ elif data_type == 'int64':
+ string_ = random.randint(0, 1000000000000)
+ elif data_type == 'int32':
+ string_ = random.randint(0, 1000000000)
+ elif data_type == 'int16':
+ string_ = random.randint(0, 10000)
+ elif data_type == 'leafref':
+ string_ = 'leafref_data-type'
+ elif data_type == 'union':
+ string_ = socket.inet_ntoa(
+ struct.pack('>I', random.randint(1, 0xffffffff)))
+ elif data_type == 'boolean':
+ string_ = True
+ else:
+ string_ = data_type
+
+ return string_
+ else:
+ if (isinstance(data_type, dict)):
+ if 'enumeration' in data_type:
+ string_ = list(data_type['enumeration']['enum'])[0]
+ return string_
+ if 'leafref' in data_type:
+ data_type = 'leafref'
+ if 'union' in data_type:
+ data_type = 'union'
+
+ # print(data_type, test_value)
+ if not (isinstance(data_type, dict)):
+ if keys and keys[list(keys)[0]]:
+ if list(keys.values())[0] in keys:
+ if data_type in test_value:
+ return test_value[data_type]
+ else:
+ if data_type in test_value:
+ return test_value[data_type]
+
+ if data_type == 'string':
+ string_ = ''.join(choice(ascii_lowercase) for i in range(12))
+ elif data_type == 'uint64':
+ string_ = uuid.uuid4().int & (1 << 64) - 1
+ elif data_type == 'uint8':
+ string_ = uuid.uuid4().int & (1 << 8) - 1
+ elif data_type == 'uint32':
+ string_ = uuid.uuid4().int & (1 << 32) - 1
+ elif data_type == 'uint16':
+ string_ = uuid.uuid4().int & (1 << 16) - 1
+ elif data_type == 'decimal64':
+ string_ = float(decimal.Decimal('%d.%d'
+ % (random.randint(0, 99999999),
+ random.randint(0, 999))))
+ elif data_type == 'int64':
+ string_ = random.randint(0, 99999999)
+ elif data_type == 'int32':
+ string_ = random.randint(0, 999999)
+ elif data_type == 'int16':
+ string_ = random.randint(0, 999999)
+ elif data_type == 'leafref':
+ string_ = 'leafref_data-type'
+ elif data_type == 'union':
+ string_ = socket.inet_ntoa(
+ struct.pack('>I', random.randint(1, 0xffffffff)))
+ elif data_type == 'boolean':
+ string_ = True
+ else:
+ string_ = data_type
+
+ return string_
+
+
+def traverse_it(it, path='', data_json={}, original=True, test_value={},
+ test_key=None, avoid=[], depth=0, max_depth=0):
+ """Main recursicve traversel method, which will go through the schema
+ and generate data JSON
+
+ Args:
+ it (json): schema
+ data_json (dict): used to generate the data for particular key which is
+ present in this dict
+ original (boolean): used to generate original(complete) data JSON
+ test_value (dict): data type and the corresponding value which is
+ getting replaced generated
+ test_key (string): the key which is gonna get tested
+ avoid (list): these keys will get avoided while JSON is getting
+ created
+ depth (int): depth of the JSON
+ max_depth (int: will be the max depth of the JSON)
+
+ Returns:
+ Json data
+ """
+
+ if (isinstance(it, list)):
+ temp = {}
+ depth += 1
+ if depth == max_depth:
+ return []
+ for item in it:
+ # print(path)
+
+ x = traverse_it(item, path=path, data_json=data_json,
+ original=original,
+ test_value=test_value, test_key=test_key,
+ avoid=avoid,
+ depth=depth,
+ max_depth=max_depth)
+ temp.update(x)
+ return temp
+ elif (isinstance(it, dict)):
+ if 'name' in it.keys():
+ if it['name'] == 'disabled':
+ temp = [{it['name']: ''}, {}]
+ return random.choice(temp)
+ path = path + '/' + it['name']
+ if 'type' in it.keys():
+
+ if it['type'] == 'container':
+ depth += 1
+ if depth == max_depth:
+ return {}
+ data_json = {
+ it['name']: traverse_it(it['properties'],
+ path=path, data_json=data_json,
+ original=original,
+ test_value=test_value,
+ test_key=test_key,
+ avoid=avoid,
+ depth=depth,
+ max_depth=max_depth)
+ }
+ return data_json
+ elif it['type'] == 'list':
+ for item_check in it['properties']:
+
+ if 'data-type' in item_check:
+ if (isinstance(item_check['data-type'], dict)):
+ if 'leafref' in item_check['data-type']:
+ temp = {it['name']: []}
+ return temp
+ depth += 1
+
+ if depth == max_depth:
+ return {}
+
+ temp = {
+ it['name']:
+ [traverse_it(it['properties'], path=path,
+ data_json=data_json,
+ original=original,
+ test_value=test_value, test_key=test_key,
+ avoid=avoid,
+ depth=depth,
+ max_depth=max_depth)]
+ }
+ return temp
+ elif it['type'] == 'case':
+ for item_check in it['properties']:
+ if 'data-type' in item_check:
+ if (isinstance(item_check['data-type'], dict)):
+ if 'leafref' in item_check['data-type']:
+ return {}
+ depth += 1
+ if depth == max_depth:
+ return {}
+
+ return traverse_it(it['properties'][0], path=path,
+ data_json=data_json,
+ original=original,
+ test_value=test_value, test_key=test_key,
+ avoid=avoid,
+ depth=depth,
+ max_depth=max_depth)
+ elif it['type'] == 'choice':
+ depth += 1
+
+ if depth == max_depth:
+ return {}
+
+ return traverse_it(it['properties'][0], path=path,
+ data_json=data_json,
+ original=original,
+ test_value=test_value, test_key=test_key,
+ avoid=avoid,
+ depth=depth,
+ max_depth=max_depth)
+ elif it['type'] == 'leaf':
+ # print(data_json)
+ if it['name'] in avoid:
+ return {}
+ if 'data-type' in it:
+ if 'subnet-address' == it['name']:
+ data = '255.255.255.0/24'
+ elif 'numa-unaware' == it['name']:
+ data = ''
+ elif 'ephemeral' == it['name']:
+ data = ''
+ else:
+ data = populate_data(it['data-type'],
+ original=original,
+ test_value=test_value,
+ keys={it['name']: test_key})
+ return {it['name']: data}
+ else:
+ if 'subnet-address' == it['name']:
+ data = '255.255.255.0/24'
+ elif 'numa-unaware' == it['name']:
+ data = ''
+ elif 'ephemeral' == it['name']:
+ data = ''
+ else:
+ data = populate_data(it['data-type'],
+ original=original,
+ test_value=test_value,
+ keys={it['name']: test_key})
+ return {it['name']: data}
+
+ else:
+ print('Error in the JSON!')
+ exit(1)
--- /dev/null
+
+from .imports import * # noqa
+
+
+def parse_cli():
+ """Parse command line options
+ """
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--confd-host", help="confd IP",
+ dest='confd_host',
+ default='127.0.0.1')
+ args = parser.parse_args()
+
+ return args
+
+
+def parse_input_data(file_name):
+ """
+ open the input file and make into a python Dict Obj
+ """
+
+ data = ''
+
+ with open(file_name, 'r') as ipf:
+ data = json.load(ipf)
+
+ return data
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright 2016-2017 RIFT.io Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import gi
+import numpy as np
+import os
+import pytest
+import random
+import time
+
+import rift.auto.descriptor
+from rift.auto.os_utils import get_mem_usage, print_mem_usage
+gi.require_version('RwNsrYang', '1.0')
+gi.require_version('RwProjectNsdYang', '1.0')
+gi.require_version('RwVnfrYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
+from gi.repository import (
+ RwNsrYang,
+ RwVnfrYang,
+ RwVlrYang,
+ RwProjectNsdYang,
+ RwProjectVnfdYang,
+ )
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+
+@pytest.fixture(scope='module')
+def rwvnfr_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwVnfrYang)
+
+
+@pytest.fixture(scope='module')
+def rwvlr_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwVlrYang)
+
+
+@pytest.fixture(scope='module')
+def rwnsr_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwNsrYang)
+
+
+@pytest.fixture(scope='module')
+def nsd_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwProjectNsdYang)
+
+
+@pytest.fixture(scope='module')
+def vnfd_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwProjectVnfdYang)
+
+
+@pytest.mark.setup('multiple_ns_setup')
+@pytest.mark.depends('launchpad')
+@pytest.mark.incremental
+class TestMultipleNsSetup(object):
+ def test_onboard_descriptors(self, logger, mgmt_session, descriptors, nsd_proxy, vnfd_proxy):
+ """Onboards the VNF, NS packages required for the test"""
+ vnfds, nsds = [], []
+ for descriptor in descriptors:
+ pkg_type = rift.auto.descriptor.get_package_type(descriptor)
+ if pkg_type == 'NSD':
+ nsds.append(descriptor)
+ elif pkg_type == 'VNFD':
+ vnfds.append(descriptor)
+
+ pkgs_in_upload_seq = vnfds + nsds
+ logger.debug('Packages in sequence of upload: {}'.format([os.path.basename(pkg) for pkg in pkgs_in_upload_seq]))
+
+ for pkg in pkgs_in_upload_seq:
+ logger.debug('Uploading package {}'.format(pkg))
+ rift.auto.descriptor.onboard(mgmt_session, pkg) # Raise exception if the upload is not successful
+
+ # Verify if the packages are uploaded
+ assert len(vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog').vnfd) == len(vnfds)
+ assert len(nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog').nsd) == len(nsds)
+
+
+@pytest.mark.depends('multiple_ns_setup')
+@pytest.mark.incremental
+class TestMultipleNsInstantiate(object):
+ def test_instantiate_ns_mem_check(self, logger, rwvnfr_proxy, nsd_proxy,
+ rwnsr_proxy, rwvlr_proxy,
+ cloud_account_name, descriptors):
+ """It runs over a loop. In each loop, it instantiates a NS,
+ terminates the NS, checks memory usage of the system.
+ During memory check, it verifies whether current system
+ mem usage exceeds base memory-usage by a defined threshold.
+ """
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
+
+ # Random NSD sequence generation for NS instantiation
+ iteration, no_of_hours = map(float, pytest.config.getoption('--multiple-ns-instantiate').split(','))
+ nsd_count = len([pkg for pkg in descriptors if 'nsd.' in pkg])
+ nsd_instantiate_seq = np.random.choice(list(range(nsd_count)), int(iteration))
+ random.shuffle(nsd_instantiate_seq)
+
+ logger.debug('nsd instantiaion sequence: {}'.format([catalog.nsd[seq].name for seq in nsd_instantiate_seq]))
+
+ # Collect mem-usage of the system
+ base_system_rss = get_mem_usage()
+ print_mem_usage()
+
+ start_time = time.time()
+ total_duration_in_secs = no_of_hours * 60 * 60
+ # Loop through NSD instantiation sequence and instantiate the NS
+ for idx, seq in enumerate(nsd_instantiate_seq, 1):
+ # Instantiating NS
+ nsd = catalog.nsd[seq]
+ logger.debug('Iteration {}: Instantiating NS {}'.format(idx, nsd.name))
+
+ nsr = rift.auto.descriptor.create_nsr(cloud_account_name, nsd.name, nsd)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
+
+ # Verify if NS reaches active state
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
+ assert nsr_opdata is not None
+
+ # Verify NSR instances enter 'running' operational-status
+ for nsr in rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata').nsr:
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(
+ quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+ # Verify NSR instances enter 'configured' config-status
+ for nsr in rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata').nsr:
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
+
+ time.sleep(30) # Let it run for few secs before terminating it
+
+ # Terminates the NSR
+ rift.auto.descriptor.terminate_nsr(rwvnfr_proxy, rwnsr_proxy,
+ rwvlr_proxy, logger)
+
+ time.sleep(30) # After NS termination, wait for few secs before collecting mem-usage
+
+ # Get the mem-usage and compare it with base mem-usage
+ print_mem_usage()
+ curr_system_rss = get_mem_usage()
+ threshold = 5
+ mem_usage_inc = 100 * (curr_system_rss - base_system_rss) / base_system_rss
+ if mem_usage_inc > threshold:
+ assert False, 'There is an increase of {}%% during sequence {}. Base system-rss- {}; Current system-rss- {}'.format(
+ mem_usage_inc, idx, base_system_rss, curr_system_rss)
+
+ if (time.time() - start_time) > total_duration_in_secs:
+ logger.debug('NS instantiation has been happening for last {} hours (provided limit). Exiting.'.format(
+ no_of_hours))
+ break
+
+
+@pytest.mark.depends('multiple_ns_setup')
+@pytest.mark.teardown('multiple_ns_setup')
+@pytest.mark.incremental
+class TestMultipleNsTeardown(object):
+ def test_delete_descritors(self, nsd_proxy, vnfd_proxy):
+ """Deletes VNF, NS descriptors"""
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
+ for nsd in nsds.nsd:
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
+ nsd_proxy.delete_config(xpath)
+
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
+ assert nsds is None or len(nsds.nsd) == 0
+
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+ for vnfd_record in vnfds.vnfd:
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
+ vnfd_proxy.delete_config(xpath)
+
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+ assert vnfds is None or len(vnfds.vnfd) == 0
#!/usr/bin/env python
"""
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.io Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@brief Onboard descriptors
"""
+import gi
import json
import logging
+import numpy as np
import os
import pytest
-import shlex
+import random
import requests
+import requests_toolbelt
+import shlex
import shutil
import subprocess
import time
import rift.auto.mano
import rift.auto.session
+import rift.auto.descriptor
-import gi
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
+gi.require_version('RwStagingMgmtYang', '1.0')
+gi.require_version('RwPkgMgmtYang', '1.0')
+gi.require_version('RwVlrYang', '1.0')
from gi.repository import (
RwcalYang,
- NsdYang,
+ RwProjectNsdYang,
RwNsrYang,
RwVnfrYang,
NsrYang,
VnfrYang,
VldYang,
- RwVnfdYang,
+ RwProjectVnfdYang,
RwLaunchpadYang,
- RwBaseYang
+ RwBaseYang,
+ RwStagingMgmtYang,
+ RwPkgMgmtYang,
+ RwImageMgmtYang,
+ RwTypes,
+ RwVlrYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(scope='module')
def vnfd_proxy(request, mgmt_session):
- return mgmt_session.proxy(RwVnfdYang)
+ return mgmt_session.proxy(RwProjectVnfdYang)
@pytest.fixture(scope='module')
def rwvnfr_proxy(request, mgmt_session):
return mgmt_session.proxy(VldYang)
+@pytest.fixture(scope='module')
+def rwvlr_proxy(request, mgmt_session):
+ return mgmt_session.proxy(RwVlrYang)
+
+
@pytest.fixture(scope='module')
def nsd_proxy(request, mgmt_session):
- return mgmt_session.proxy(NsdYang)
+ return mgmt_session.proxy(RwProjectNsdYang)
@pytest.fixture(scope='module')
def endpoint():
return "upload"
-def create_nsr(nsd, input_param_list, cloud_account_name):
- """
- Create the NSR record object
-
- Arguments:
- nsd - NSD
- input_param_list - list of input-parameter objects
-
- Return:
- NSR object
- """
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
-
- nsr.id = str(uuid.uuid4())
- nsr.name = rift.auto.mano.resource_name(nsr.id)
- nsr.short_name = "nsr_short_name"
- nsr.description = "This is a description"
- nsr.nsd.from_dict(nsd.as_dict())
- nsr.admin_status = "ENABLED"
- nsr.input_parameter.extend(input_param_list)
- nsr.cloud_account = cloud_account_name
-
- return nsr
-
def upload_descriptor(
logger,
host=host,
endpoint=endpoint)
-def terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=True):
- """
- Terminate the instance and check if the record is deleted.
-
- Asserts:
- 1. NSR record is deleted from instance-config.
-
- """
- logger.debug("Terminating NSRs")
-
- nsr_path = "/ns-instance-config"
- nsr = rwnsr_proxy.get_config(nsr_path)
- nsrs = nsr.nsr
-
- xpaths = []
- for nsr in nsrs:
- xpath = "/ns-instance-config/nsr[id='{}']".format(nsr.id)
- rwnsr_proxy.delete_config(xpath)
- xpaths.append(xpath)
-
- if wait_after_kill:
- time.sleep(30)
- else:
- time.sleep(5)
- for xpath in xpaths:
- nsr = rwnsr_proxy.get_config(xpath)
- assert nsr is None
+def get_ns_cloud_resources(rwvnfr_proxy, rwvlr_proxy):
+ """Returns a collection of ports, networks, VMs used by this NS"""
+ ns_cloud_resources = {'ports':[], 'vms':[], 'networks':[]}
- # Get the ns-instance-config
- ns_instance_config = rwnsr_proxy.get_config("/ns-instance-config")
+ # Get ports and VMs associated with each VNF
+ vnfrs = rwvnfr_proxy.get('/rw-project:project[rw-project:name="default"]/vnfr-catalog/vnfr', list_obj=True)
+ for vnfr in vnfrs.vnfr:
+ for cp in vnfr.connection_point:
+ ns_cloud_resources['ports'].append(cp.connection_point_id)
+ for vdur in vnfr.vdur:
+ ns_cloud_resources['vms'].append(vdur.vim_id)
- # Termination tests
- vnfr = "/vnfr-catalog/vnfr"
- vnfrs = rwvnfr_proxy.get(vnfr, list_obj=True)
- assert vnfrs is None or len(vnfrs.vnfr) == 0
-
- # nsr = "/ns-instance-opdata/nsr"
- # nsrs = rwnsr_proxy.get(nsr, list_obj=True)
- # assert len(nsrs.nsr) == 0
+ # Get the network associated with each NS
+ vlrs = rwvlr_proxy.get('/rw-project:project[rw-project:name="default"]/vlr-catalog/vlr', list_obj=True)
+ for vlr in vlrs.vlr:
+ ns_cloud_resources['networks'].append(vlr.network_id)
+ return ns_cloud_resources
@pytest.mark.setup('nsr')
class TestNsrStart(object):
"""A brief overview of the steps performed.
1. Generate & on-board new descriptors
- 2. Start the NSR
+ 2. Start the NSR
"""
def test_upload_descriptors(
mgmt_session,
scheme,
cert,
- descriptors
+ descriptors,
+ iteration,
):
"""Generates & On-boards the descriptors.
+
+ 1. Request a staging area: RPC returns an endpoint and port
+ 1. Upload the file to the endpoint, return the endpoint to download
+ 2. Reconstruct the URL and trigger an RPC upload for the package.
"""
+ # We are instantiating the NS twice in port-sequencing test. Seconds NS instantiation will be using already uploaded
+ # descriptors with updated interface positional values.
+ if iteration==1 and pytest.config.getoption("--port-sequencing"):
+ pytest.skip()
endpoint = "upload"
for file_name in descriptors:
- onboard_descriptor(
- mgmt_session.host,
- file_name,
- logger,
- endpoint,
- scheme,
- cert)
+
+ ip = RwStagingMgmtYang.YangInput_RwStagingMgmt_CreateStagingArea.from_dict({
+ "package_type": "VNFD"})
+
+ if "nsd" in file_name:
+ ip.package_type = "NSD"
+
+ data = mgmt_session.proxy(RwStagingMgmtYang).rpc(ip)
+ form = requests_toolbelt.MultipartEncoder(fields={
+ 'file': (os.path.basename(file_name),
+ open(file_name, 'rb'),
+ 'application/octet-stream')
+ })
+
+ response = requests.post(
+ "{}://{}:{}/{}".format(
+ scheme,
+ mgmt_session.host,
+ data.port,
+ data.endpoint),
+ data=form.to_string(),
+ cert=cert, # cert is a tuple
+ verify=False,
+ headers={"Content-Type": "multipart/form-data"})
+
+ resp = json.loads(response.text)
+ url = "https://{}:{}{}".format(mgmt_session.host, data.port, resp['path'])
+
+ ip = RwPkgMgmtYang.YangInput_RwPkgMgmt_PackageCreate.from_dict({
+ "package_type": "VNFD",
+ "external_url": url
+ })
+
+ if "nsd" in file_name:
+ ip.package_type = "NSD"
+
+ # trigger the upload.
+ resp = mgmt_session.proxy(RwPkgMgmtYang).rpc(ip)
+
+ wait_onboard_transaction_finished(
+ logger,
+ resp.transaction_id,
+ scheme,
+ cert,
+ host=mgmt_session.host,
+ endpoint=endpoint)
descriptor_vnfds, descriptor_nsd = descriptors[:-1], descriptors[-1]
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
actual_vnfds = catalog.vnfd
assert len(actual_vnfds) == len(descriptor_vnfds), \
"There should {} vnfds".format(len(descriptor_vnfds))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
actual_nsds = catalog.nsd
assert len(actual_nsds) == 1, "There should only be a single nsd"
+ @pytest.mark.skipif(not pytest.config.getoption('--upload-images-multiple-accounts'),
+ reason="need --upload-images-multiple-accounts option to run")
+ def test_images_uploaded_multiple_accounts(self, logger, mgmt_session, random_image_name, cloud_accounts, cal):
+ image_mgmt_proxy = mgmt_session.proxy(RwImageMgmtYang)
+ upload_jobs = image_mgmt_proxy.get('/rw-project:project[rw-project:name="default"]/upload-jobs')
+ logger.info('Embedded image name(apart from ping pong Fedora images): {}'.format(random_image_name))
+ for job in upload_jobs.job:
+ assert image_mgmt_proxy.wait_for('/rw-project:project[rw-project:name="default"]/upload-jobs/job[id={}]/status'.format(quoted_key(job.id)), 'COMPLETED', timeout=240)
+ assert len(job.upload_tasks) == len(cloud_accounts)
+ for upload_task in job.upload_tasks:
+ assert upload_task.status == 'COMPLETED'
+
+ assert len(upload_jobs.job) == 3
+
+ # Check whether images are present in VIMs
+ for account in cloud_accounts:
+ rc, res = cal.get_image_list(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()))
+ assert rc == RwTypes.RwStatus.SUCCESS
+ assert [image for image in res.imageinfo_list if image.name == random_image_name]
+
+ @pytest.mark.skipif(not pytest.config.getoption("--vnf-onboard-delete"), reason="need --vnf-onboard-delete option to run")
+ def test_upload_delete_descriptors(self, logger, mgmt_session, vnfd_proxy, descriptors, vnf_onboard_delete):
+ """Randomly upload and delete VNFs. With each upload/delete, verify if the VNF
+ gets uploaded/deleted successfully.
+ """
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]"
+ iteration, vnf_count = map(int, vnf_onboard_delete.split(','))
+
+ # Get the VNF paths to be used for onboarding
+ all_vnfs = [pkg_path for pkg_path in descriptors if '_nsd' not in os.path.basename(pkg_path)]
+ if vnf_count > len(all_vnfs):
+ vnf_count = len(all_vnfs)
+ available_vnfs = random.sample(all_vnfs, vnf_count)
+
+ # Get the add, delete iterations
+ add_del_seq = list(np.random.choice(['add', 'del'], iteration))
+ random.shuffle(add_del_seq)
+ logger.info('Vnf add-delete iteration sequence: {}'.format(add_del_seq))
+
+ uploaded_vnfs = {}
+
+ def get_vnfd_list():
+ """Returns list of VNFDs"""
+ vnfd_obj = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
+ return vnfd_obj.vnfd if vnfd_obj else []
+
+ def delete_vnfd():
+ """Deletes a VNFD"""
+ vnf_path, vnfd_id = random.choice(list(uploaded_vnfs.items()))
+ logger.info('Deleting VNF {} having id {}'.format(os.path.basename(vnf_path), vnfd_id))
+ vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id)))
+ uploaded_vnfs.pop(vnf_path)
+ available_vnfs.append(vnf_path)
+ assert not [vnfd for vnfd in get_vnfd_list() if vnfd.id == vnfd_id]
+
+ for op_type in add_del_seq:
+ if op_type =='del':
+ if uploaded_vnfs:
+ delete_vnfd()
+ continue
+ op_type = 'add'
+
+ if op_type == 'add':
+ if not available_vnfs:
+ delete_vnfd()
+ continue
+ vnf_path = random.choice(available_vnfs)
+ logger.info('Adding VNF {}'.format(os.path.basename(vnf_path)))
+ rift.auto.descriptor.onboard(mgmt_session, vnf_path)
+ vnfs = get_vnfd_list()
+ assert len(vnfs) == len(uploaded_vnfs) + 1
+ vnfd = [vnfd for vnfd in vnfs if vnfd.id not in list(uploaded_vnfs.values())]
+ assert len(vnfd) == 1
+ vnfd = vnfd[0]
+ assert vnfd.name
+ assert vnfd.connection_point
+ assert vnfd.vdu
+ uploaded_vnfs[vnf_path] = vnfd.id
+ available_vnfs.remove(vnf_path)
+
+ assert len(get_vnfd_list()) == len(uploaded_vnfs)
+ logger.info('Onboarded VNFs : {}'.format(uploaded_vnfs))
+
+ assert len(available_vnfs) + len(uploaded_vnfs) == vnf_count
+ # cleanup - Delete VNFs(if any)
+ for vnfd_id in uploaded_vnfs.values():
+ vnfd_proxy.delete_config(xpath.format(quoted_key(vnfd_id)))
+
@pytest.mark.feature("upload-image")
def test_upload_images(self, descriptor_images, cloud_host, cloud_user, cloud_tenants):
[(tenant, "private") for tenant in cloud_tenants])
for image_location in descriptor_images:
- image = RwcalYang.ImageInfoItem.from_dict({
+ image = RwcalYang.YangData_RwProject_Project_VimResources_ImageinfoList.from_dict({
'name': os.path.basename(image_location),
'location': image_location,
'disk_format': 'qcow2',
def test_set_scaling_params(self, nsd_proxy):
- nsds = nsd_proxy.get('/nsd-catalog')
+ nsds = nsd_proxy.get('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = nsds.nsd[0]
for scaling_group in nsd.scaling_group_descriptor:
scaling_group.max_instance_count = 2
- nsd_proxy.replace_config('/nsd-catalog/nsd[id="{}"]'.format(
- nsd.id), nsd)
+ nsd_proxy.replace_config('/rw-project:project[rw-project:name="default"]/nsd-catalog/nsd[id={}]'.format(
+ quoted_key(nsd.id)), nsd)
+ @pytest.mark.skipif(not (pytest.config.getoption("--update-vnfd-instantiate") or pytest.config.getoption("--port-sequencing")),
+ reason="need --update-vnfd-instantiate or --port-sequencing option to run")
+ def test_update_vnfd(self, vnfd_proxy, iteration, port_sequencing_intf_positions):
+ """Updates few fields of ping pong VNFDs and verify those changes
+ """
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]"
+ vnfd_catalog = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd"
+
+ if iteration==0 and pytest.config.getoption("--port-sequencing"):
+ pytest.skip()
+
+ def get_vnfd():
+ vnfds = vnfd_proxy.get(vnfd_catalog, list_obj=True)
+ dict_ = {}
+
+ # Get ping pong VNFDs
+ for vnfd in vnfds.vnfd:
+ if 'ping' in vnfd.name:
+ dict_['ping'] = vnfd
+ if 'pong' in vnfd.name:
+ dict_['pong'] = vnfd
+ return dict_
+
+ vnfds_dict = get_vnfd()
+ update_data = {'ping':{'static_ip_address':'31.31.31.60'}, 'pong':{'static_ip_address':'31.31.31.90'}}
+ port_sequencing_intf_positions_tmp = port_sequencing_intf_positions[:]
+
+ # Modify/add fields in VNFDs
+ for name_, vnfd in vnfds_dict.items():
+ if pytest.config.getoption('--update-vnfd-instantiate'):
+ vnfd.vdu[0].interface[1].static_ip_address = update_data[name_]['static_ip_address']
+ if pytest.config.getoption('--port-sequencing'):
+ vnfd_intf_list = vnfd.vdu[0].interface
+ # for ping vnfd, remove positional values from all interfaces
+ # for pong vnfd, modify the positional values as per fixture port_sequencing_intf_positions
+ if 'ping' in vnfd.name:
+ tmp_intf_list = []
+ for i in range(len(vnfd_intf_list)):
+ tmp_intf_dict = vnfd_intf_list[-1].as_dict()
+ del tmp_intf_dict['position']
+ vnfd_intf_list.pop()
+ tmp_intf_list.append(tmp_intf_dict)
+ for intf_dict_without_positional_values in tmp_intf_list:
+ new_intf = vnfd.vdu[0].interface.add()
+ new_intf.from_dict(intf_dict_without_positional_values)
+
+ if 'pong' in vnfd.name:
+ for intf in vnfd_intf_list:
+ if 'position' in intf:
+ intf.position = port_sequencing_intf_positions_tmp.pop()
+
+ # Update/save the VNFDs
+ for vnfd in vnfds_dict.values():
+ vnfd_proxy.replace_config(xpath.format(quoted_key(vnfd.id)), vnfd)
+
+ # Match whether data is updated
+ vnfds_dict = get_vnfd()
+ assert vnfds_dict
+ for name_, vnfd in vnfds_dict.items():
+ if pytest.config.getoption('--update-vnfd-instantiate'):
+ assert vnfd.vdu[0].interface[1].static_ip_address == update_data[name_]['static_ip_address']
+ if pytest.config.getoption('--port-sequencing'):
+ if 'ping' in vnfd.name:
+ for intf in vnfd.vdu[0].interface:
+ assert 'position' not in intf.as_dict()
+ if 'pong' in vnfd.name:
+ tmp_positional_values_list = []
+ for intf in vnfd.vdu[0].interface:
+ if 'position' in intf.as_dict():
+ tmp_positional_values_list.append(intf.position)
+ assert set(tmp_positional_values_list) == set(port_sequencing_intf_positions)
def test_instantiate_nsr(self, logger, nsd_proxy, rwnsr_proxy, base_proxy, cloud_account_name):
config_param.value,
running_config.input_parameter))
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
input_parameters = []
- descr_xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id='%s']/nsd:description" % nsd.id
- descr_value = "New NSD Description"
+ descr_xpath = "/nsd:nsd-catalog/nsd:nsd/nsd:vendor"
+ descr_value = "New Vendor"
in_param_id = str(uuid.uuid4())
- input_param_1 = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ input_param_1 = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath=descr_xpath,
value=descr_value)
input_parameters.append(input_param_1)
- nsr = create_nsr(nsd, input_parameters, cloud_account_name)
+ nsr = rift.auto.descriptor.create_nsr(cloud_account_name, nsd.name, nsd, input_param_list=input_parameters)
logger.info("Instantiating the Network Service")
- rwnsr_proxy.create_config('/ns-instance-config/nsr', nsr)
+ rwnsr_proxy.create_config('/rw-project:project[rw-project:name="default"]/ns-instance-config/nsr', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata/nsr[ns-instance-config-ref="{}"]'.format(nsr.id))
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata/nsr[ns-instance-config-ref={}]'.format(quoted_key(nsr.id)))
assert nsr_opdata is not None
# Verify the input parameter configuration
- running_config = rwnsr_proxy.get_config("/ns-instance-config/nsr[id='%s']" % nsr.id)
+ running_config = rwnsr_proxy.get_config("/rw-project:project[rw-project:name='default']/ns-instance-config/nsr[id=%s]" % quoted_key(nsr.id))
for input_param in input_parameters:
verify_input_parameters(running_config, input_param)
def test_wait_for_nsr_started(self, rwnsr_proxy):
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ """Verify NSR instances enter 'running' operational-status
+ """
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
for nsr in nsrs:
- xpath = "/ns-instance-opdata/nsr[ns-instance-config-ref='{}']/operational-status".format(nsr.ns_instance_config_ref)
- rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=240)
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/operational-status".format(quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "running", fail_on=['failed'], timeout=400)
+
+ def test_wait_for_nsr_configured(self, rwnsr_proxy):
+ """Verify NSR instances enter 'configured' config-status
+ """
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
+ nsrs = nsr_opdata.nsr
+
+ for nsr in nsrs:
+ xpath = "/rw-project:project[rw-project:name='default']/ns-instance-opdata/nsr[ns-instance-config-ref={}]/config-status".format(quoted_key(nsr.ns_instance_config_ref))
+ rwnsr_proxy.wait_for(xpath, "configured", fail_on=['failed'], timeout=400)
@pytest.mark.teardown('nsr')
@pytest.mark.depends('launchpad')
@pytest.mark.incremental
class TestNsrTeardown(object):
- def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type):
+
+ def test_delete_embedded_images(self, random_image_name, cloud_accounts, cal):
+ """Deletes images embedded in VNF from VIM. It only deletes additional images, not
+ the Fedora ping pong images"""
+ for account in cloud_accounts:
+ rc, rsp = cal.get_image_list(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()))
+ assert rc == RwTypes.RwStatus.SUCCESS
+ if rsp is not None:
+ for image in rsp.imageinfo_list:
+ if random_image_name in image.name:
+ cal.delete_image(RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList.from_dict(account.as_dict()), image.id)
+
+ def test_terminate_nsr(self, rwvnfr_proxy, rwnsr_proxy, logger, cloud_type,
+ rwvlr_proxy, vim_clients, cloud_account_name):
"""
Terminate the instance and check if the record is deleted.
1. NSR record is deleted from instance-config.
"""
- logger.debug("Terminating NSR")
+ # Collects the Cloud resources like ports, networks, VMs used by the current NS
+ ns_cloud_resources = get_ns_cloud_resources(rwvnfr_proxy, rwvlr_proxy)
+ logger.info('Cloud resources used by NS: {}'.format(ns_cloud_resources))
+ logger.debug("Terminating NSR")
wait_after_kill = True
if cloud_type == "mock":
wait_after_kill = False
- terminate_nsr(rwvnfr_proxy, rwnsr_proxy, logger, wait_after_kill=wait_after_kill)
-
- def test_delete_records(self, nsd_proxy, vnfd_proxy):
+ rift.auto.descriptor.terminate_nsr(rwvnfr_proxy, rwnsr_proxy,
+ rwvlr_proxy, logger,
+ wait_after_kill=wait_after_kill)
+ # Collect all the ports, networks VMs from openstack and
+ # check if previously collected resources (i.e ns_cloud_resources) are still present in this collection
+ start_time = time.time()
+ while time.time()-start_time < 240:
+ try:
+ vim_client = vim_clients[cloud_account_name]
+ vim_resources = dict()
+ vim_resources['networks'] = vim_client.neutron_network_list()
+ vim_resources['vms'] = vim_client.nova_server_list()
+ vim_resources['ports'] = vim_client.neutron_port_list()
+
+ for resource_type in ns_cloud_resources.keys():
+ logger.debug("Verifying all %s resources have been removed from vim", resource_type)
+ vim_resource_ids = [
+ vim_resource['id'] for vim_resource in vim_resources[resource_type]
+ if 'shared' not in vim_resource.keys()
+ or not vim_resource['shared']
+ ]
+ for ns_resource_id in ns_cloud_resources[resource_type]:
+ logger.debug('Verifying %s resource %s removed', resource_type, ns_resource_id)
+ assert ns_resource_id not in vim_resource_ids
+ return
+ except AssertionError:
+ time.sleep(10)
+ raise AssertionError("Resources not cleared from openstack")
+
+ def test_delete_records(self, nsd_proxy, vnfd_proxy, iteration):
"""Delete the NSD & VNFD records
Asserts:
The records are deleted.
"""
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ # We are instantiating the NS twice in port-sequencing test. Seconds NS instantiation will be using already uploaded
+ # descriptors with updated interface positional values.
+ if iteration==0 and pytest.config.getoption("--port-sequencing"):
+ pytest.skip()
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
for nsd in nsds.nsd:
- xpath = "/nsd-catalog/nsd[id='{}']".format(nsd.id)
+ xpath = "/rw-project:project[rw-project:name='default']/nsd-catalog/nsd[id={}]".format(quoted_key(nsd.id))
nsd_proxy.delete_config(xpath)
- nsds = nsd_proxy.get("/nsd-catalog/nsd", list_obj=True)
+ nsds = nsd_proxy.get("/rw-project:project[rw-project:name='default']/nsd-catalog/nsd", list_obj=True)
assert nsds is None or len(nsds.nsd) == 0
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
for vnfd_record in vnfds.vnfd:
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_record.id)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_record.id))
vnfd_proxy.delete_config(xpath)
- vnfds = vnfd_proxy.get("/vnfd-catalog/vnfd", list_obj=True)
+ vnfds = vnfd_proxy.get("/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd", list_obj=True)
assert vnfds is None or len(vnfds.vnfd) == 0
@brief System test of stopping launchpad on master and
validating configuration on standby
"""
+import argparse
+import gi
import os
+import subprocess
import sys
import time
-import argparse
-import subprocess
-import gi
-from gi.repository import RwVnfdYang
+from gi.repository import RwProjectVnfdYang
from gi.repository import RwVnfrYang
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
import rift.auto.proxy
from rift.auto.session import NetconfSession
Tuple: VNFD and its corresponding VNFR entry
"""
def get_vnfd(vnfd_id):
- xpath = "/vnfd-catalog/vnfd[id='{}']".format(vnfd_id)
- return proxy(RwVnfdYang).get(xpath)
+ xpath = "/rw-project:project[rw-project:name='default']/vnfd-catalog/vnfd[id={}]".format(quoted_key(vnfd_id))
+ return proxy(RwProjectVnfdYang).get(xpath)
- vnfr = "/vnfr-catalog/vnfr"
+ vnfr = "/rw-project:project[rw-project:name='default']/vnfr-catalog/vnfr"
print ("START")
vnfrs = proxy(RwVnfrYang).get(vnfr, list_obj=True)
print ("STOP")
#!/usr/bin/env python3
"""
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
@brief System test of basic launchpad functionality
"""
+import gi
import pytest
-import gi
gi.require_version('RwsdnalYang', '1.0')
from gi.repository import RwsdnalYang
+from gi.repository import RwSdnYang
+from gi.repository import RwRoAccountYang
+
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
@pytest.mark.setup('sdn')
@pytest.mark.feature('sdn')
SDN name and accout type.
'''
proxy = mgmt_session.proxy(RwsdnalYang)
- sdn_account = RwsdnalYang.SDNAccount(
+ sdn_account = RwsdnalYang.YangData_RwProject_Project_SdnAccounts_SdnAccountList(
name=sdn_account_name,
account_type=sdn_account_type)
- xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
- proxy.create_config(xpath, sdn_account)
+ xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
+ proxy.replace_config(xpath, sdn_account)
+ sdn_account = proxy.get(xpath)
+
+ def test_create_openstack_sdn_account(self, mgmt_session, openstack_sdn_account_name, cloud_account):
+ '''Configure sdn account
+
+ Asserts:
+ SDN name and account type.
+ '''
+ proxy = mgmt_session.proxy(RwSdnYang)
+ sdn_account = RwSdnYang.YangData_RwProject_Project_Sdn_Account.from_dict({
+ 'name': openstack_sdn_account_name,
+ 'account_type': 'openstack',
+ 'openstack': {
+ 'admin': cloud_account.openstack.admin,
+ 'key': cloud_account.openstack.key,
+ 'secret': cloud_account.openstack.secret,
+ 'auth_url': cloud_account.openstack.auth_url,
+ 'tenant': cloud_account.openstack.tenant,
+ 'project_domain': cloud_account.openstack.project_domain,
+ 'user_domain': cloud_account.openstack.user_domain,
+ 'region': cloud_account.openstack.region,
+ }})
+
+ xpath = "/rw-project:project[rw-project:name='default']/sdn/account[name={}]".format(quoted_key(openstack_sdn_account_name))
+ proxy.replace_config(xpath, sdn_account)
sdn_account = proxy.get(xpath)
@pytest.mark.depends('sdn')
sdn_account.account_type is what was configured
'''
proxy = mgmt_session.proxy(RwsdnalYang)
- xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+ xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
sdn_account = proxy.get_config(xpath)
assert sdn_account.account_type == sdn_account_type
+ def test_openstack_sdn_account_connection_status(self, mgmt_session, openstack_sdn_account_name):
+ '''Verify connection status on openstack sdn account
+
+ Asserts:
+ openstack sdn account is successfully connected
+ '''
+ proxy = mgmt_session.proxy(RwSdnYang)
+ proxy.wait_for(
+ '/rw-project:project[rw-project:name="default"]/sdn/account[name={}]/connection-status/status'.format(quoted_key(openstack_sdn_account_name)),
+ 'success',
+ timeout=30,
+ fail_on=['failure'])
+
@pytest.mark.teardown('sdn')
@pytest.mark.feature('sdn')
@pytest.mark.incremental
def test_delete_odl_sdn_account(self, mgmt_session, sdn_account_name):
'''Unconfigure sdn account'''
proxy = mgmt_session.proxy(RwsdnalYang)
- xpath = "/sdn-accounts/sdn-account-list[name='%s']" % sdn_account_name
+ xpath = "/rw-project:project[rw-project:name='default']/sdn-accounts/sdn-account-list[name=%s]" % quoted_key(sdn_account_name)
+ proxy.delete_config(xpath)
+
+ def test_delete_openstack_sdn_account(self, mgmt_session, openstack_sdn_account_name):
+ '''Unconfigure sdn account'''
+ proxy = mgmt_session.proxy(RwSdnYang)
+ xpath = '/rw-project:project[rw-project:name="default"]/sdn/account[name={}]'.format(quoted_key(openstack_sdn_account_name))
proxy.delete_config(xpath)
@pytest.mark.setup('launchpad')
+@pytest.mark.depends('sdn')
@pytest.mark.usefixtures('cloud_account')
@pytest.mark.incremental
class TestLaunchpadSetup:
- def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
+ def test_create_cloud_accounts(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts, l2_port_chaining, openstack_sdn_account_name):
'''Configure cloud accounts
Asserts:
'''
proxy = mgmt_session.proxy(cloud_module)
for cloud_account in cloud_accounts:
- xpath = '{}[name="{}"]'.format(cloud_xpath, cloud_account.name)
+ if l2_port_chaining:
+ cloud_account.sdn_account = openstack_sdn_account_name
+ xpath = '{}[name={}]'.format(cloud_xpath, quoted_key(cloud_account.name))
proxy.replace_config(xpath, cloud_account)
response = proxy.get(xpath)
assert response.name == cloud_account.name
assert response.account_type == cloud_account.account_type
-@pytest.mark.depends('launchpad')
-@pytest.mark.usefixtures('cloud_account')
-@pytest.mark.incremental
-class TestLaunchpad:
def test_account_connection_status(self, mgmt_session, cloud_module, cloud_xpath, cloud_accounts):
'''Verify connection status on each cloud account
proxy = mgmt_session.proxy(cloud_module)
for cloud_account in cloud_accounts:
proxy.wait_for(
- '{}[name="{}"]/connection-status/status'.format(cloud_xpath, cloud_account.name),
+ '{}[name={}]/connection-status/status'.format(cloud_xpath, quoted_key(cloud_account.name)),
'success',
timeout=30,
fail_on=['failure'])
+ @pytest.mark.feature('openmano')
+ def test_create_ro_accounts(self, mgmt_session, ro_accounts):
+ for name, ro_account in ro_accounts.items():
+ mgmt_session.proxy(RwRoAccountYang).create_config('/rw-project:project[rw-project:name="default"]/ro-account/account', ro_account)
+
+ @pytest.mark.feature('openmano')
+ def test_ro_account_connection_status(self, mgmt_session, ro_accounts):
+ for name, ro_account in ro_accounts.items():
+ mgmt_session.proxy(RwRoAccountYang).wait_for((
+ '/rw-project:project[rw-project:name="default"]'
+ '/ro-account-state/account[name={account_name}]/connection-status/status'
+ ).format(account_name=quoted_key(ro_account.name)),
+ 'success',
+ timeout=30,
+ fail_on=['failure'])
@pytest.mark.teardown('launchpad')
@pytest.mark.usefixtures('cloud_account')
'''Unconfigure cloud_account'''
proxy = mgmt_session.proxy(cloud_module)
for cloud_account in cloud_accounts:
- xpath = "{}[name='{}']".format(cloud_xpath, cloud_account.name)
+ xpath = "{}[name={}]".format(cloud_xpath, quoted_key(cloud_account.name))
proxy.delete_config(xpath)
+
+ @pytest.mark.feature('openmano')
+ def test_delete_ro_accounts(self, mgmt_session, ro_accounts):
+ for name, ro_account in ro_accounts.items():
+ xpath = "/rw-project:project[rw-project:name='default']/ro-account/account[name={}]"
+ mgmt_session.proxy(RwRoAccountYang).delete_config(xpath.format(quoted_key(name)))
cmd_template = ("ssh_root {remote_ip} -q -o BatchMode=yes -o "
" UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -- "
- " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -e -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
+ " \"rm -rf /tmp/corosync; cd {rift_install}; {rift_root}/rift-shell -- {rift_install}/usr/bin/rwmain -m /tmp/manifest.xml\"").format(
remote_ip=remote_ip,
rift_root=rift_root,
rift_install=rift_install)
--- /dev/null
+{
+ "test_name":"TC_COMPLEX_SCALING",
+ "commandline":"./complex_scaling --test-name 'TC_COMPLEX_SCALING' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --complex-scaling --multiple-ns-instantiate 0,0",
+ "test_description":"System test to perform a multi event test",
+ "allow_production_launchpad": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_EMBEDDED_IMAGES_VNF_MULTIPLE_VIM_ACCOUNTS",
+ "commandline":"./accounts_creation_onboard_systest --test-name 'TC_EMBEDDED_IMAGES_VNF_MULTIPLE_VIM_ACCOUNTS' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --upload-images-multiple-accounts",
+ "test_description":"System test to check whether images embedded in VNF package get uploaded to all VIM accounts(Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_GUI_TEST_LAUNCHPAD",
+ "commandline":"./gui_test_launchpad_ui --test-name 'TC_GUI_TEST_LAUNCHPAD' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to cehck the basic functionality of the ui",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"broken",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_HA_BASICS_TEST",
+ "commandline":"./ha_basics_systest --test-name 'TC_HA_BASICS_SYSTEMTEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to validate HA failover between active, standby systems",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "target_vm":"lp_active",
+ "vms":[
+ {
+ "name": "lp_active",
+ "type": "container",
+ "modes":[]
+ },
+ {
+ "name": "lp_standby",
+ "type": "container",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_HA_DELETION_OPERATIONS",
+ "commandline":"./ha_deletion_operations --test-name 'TC_HA_DELETION_OPERATIONS' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to validate some deletion operations on the ha system",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"broken",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "target_vm":"lp_active",
+ "vms":[
+ {
+ "name": "lp_active",
+ "type": "container",
+ "modes":[]
+ },
+ {
+ "name": "lp_standby",
+ "type": "container",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_HA_MULTIPLE_FAILOVERS_TEST",
+ "commandline":"./ha_multiple_failovers_systest --test-name 'TC_HA_MULTIPLE_FAILOVERS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --ha-multiple-failovers",
+ "test_description":"System test to validate multiple HA failover between active, standby systems",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 3300,
+ "networks":[],
+ "target_vm":"lp_active",
+ "vms":[
+ {
+ "name": "lp_active",
+ "type": "container",
+ "modes":[]
+ },
+ {
+ "name": "lp_standby",
+ "type": "container",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_HA_NSR_TEST",
+ "commandline":"./ha_basics_systest --test-name 'TC_HA_NSR_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --nsr-test",
+ "test_description":"System test to validate HA failover between active, standby systems when NSRs are instantiated across different projects",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 3000,
+ "networks":[],
+ "target_vm":"lp_active",
+ "vms":[
+ {
+ "name": "lp_active",
+ "type": "container",
+ "modes":[]
+ },
+ {
+ "name": "lp_standby",
+ "type": "container",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_L2PORT_CHAINING",
+ "commandline":"./l2port_chaining_systest --test-name 'TC_L2PORT_CHAINING' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --l2-port-chaining",
+ "test_description":"System test to check L2 port chaining feature (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_METADATA_VDUD_CFGFILE",
+ "commandline":"./metadata_vdud_systest --test-name 'TC_METADATA_VDUD_CFGFILE' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --metadata-vdud-cfgfile",
+ "test_description":"System test to check metadata for vdud feature (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
"commandline":"./launchpad_systest --test-name 'TC_MULTI_TENANT_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --sysinfo",
"test_description":"System test for multiple tenants(Openstack)",
"required_tenants":2,
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": false,
"status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["ci","nightly","MANO","openstack"],
"timelimit": 1800,
"networks":[],
"vms":[
"test_description":"System test for scriptable load balancer with Multi-VMs VNFs",
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
"test_description":"System test for trafgen application with Multi-VMs VNFs",
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
--- /dev/null
+{
+ "test_name":"TC_NS_INSTANTIATE_MEMORY_CHECK",
+ "commandline":"./ns_instantiate_memory_check_systest --test-name 'TC_NS_INSTANTIATE_MEMORY_CHECK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --multiple-ns-instantiate 50,4",
+ "test_description":"instantiates and deletes VNFs while tracking memory utilization",
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["nightly","smoke","MANO","openstack"],
+ "timelimit": 21000,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
--- /dev/null
+{
+ "test_name":"TC_ONBOARD_DELETE_VNFS_RANDOMLY",
+ "commandline":"./onboard_delete_vnfs_systest --test-name 'TC_ONBOARD_DELETE_VNFS_RANDOMLY' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --vnf-onboard-delete 60,10",
+ "test_description":"System test to onboard and delete m VNFs randomly for n iterations (params passed as n,m)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_ACCOUNTS_OPENSTACK",
+ "license": "Apache 2.0",
+ "commandline":"./pingpong_accounts_systest --test-name 'TC_PINGPONG_ACCOUNTS_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --restconf",
+ "test_description":"System test testing vim/ro instantiation (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["ci","nightly","smoke","MANO","openstack","docker"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ },
+ {
+ "name": "openmano-1",
+ "type": "container",
+ "image":"{registry}/ub16:openmano-v2.0",
+ "modes":[]
+ },
+ {
+ "name": "openmano-2",
+ "type": "container",
+ "image":"{registry}/ub16:openmano-v2.0",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_FLOATING_IP",
+ "commandline":"./pingpong_floating_ip --test-name 'TC_PINGPONG_FLOATING_IP' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants}",
+ "test_description":"TC for testing the pingpong floating ip pools",
+ "allow_production_launchpad": true,
+ "status":"broken",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 4000,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_HA_OPENSTACK",
+ "commandline":"./pingpong_ha_systest --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants}",
+ "test_description":"HA System Test that kills system components while running ping pong testcases",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": false,
+ "status":"broken",
+ "keywords":["nightly","MANO","openstack"],
+ "timelimit": 4000,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_INPUT_PARAMS_OPENSTACK",
+ "license": "Apache 2.0",
+ "commandline":"./pingpong_input_params_systest --test-name 'TC_PINGPONG_INPUT_PARAMS_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to test vnf input parameters (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","smoke","MANO","openstack","docker"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
"test_description":"System test for standalone Launchpad (Openstack) with High availability",
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2600,
"networks":[],
"vms":[
--- /dev/null
+{
+ "test_name":"TC_METADATA_VDUD_CUSTOM_METADATA",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_METADATA_VDUD_CUSTOM_METADATA' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --metadata-vdud",
+ "test_description":"System test to check metadata for vdud feature (Openstack). It doesn't cover config file copy check",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_MRO_OPENSTACK",
+ "license": "Apache 2.0",
+ "commandline":"./pingpong_mro_systest --test-name 'TC_PINGPONG_MRO_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --restconf",
+ "test_description":"System test for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["ci","nightly","smoke","MANO","openstack","docker"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ },
+ {
+ "name": "openmano_0",
+ "type": "container",
+ "image":"{registry}/ub16:openmano-v2.0",
+ "modes":[]
+ },
+ {
+ "name": "openmano_1",
+ "type": "container",
+ "image":"{registry}/ub16:openmano-v2.0",
+ "modes":[]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_MULTIDISK",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_MULTIDISK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --multidisk",
+ "test_description":"System test to check multidisk for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_MULTIDISK_OPENSTACK_XML",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_MULTIDISK_OPENSTACK_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --multidisk --use-xml-mode",
+ "test_description":"System test to check multidisk for ping and pong vnf (Openstack) using xml-agent",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_EXPLICIT_PORT_SEQUENCING",
+ "commandline":"./accounts_creation_onboard_instatiate_systest_repeat_option --test-name 'TC_PINGPONG_EXPLICIT_PORT_SEQUENCING' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --port-sequencing",
+ "test_description":"System test to verify explicit port sequencings feature for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_XML_EXPLICIT_PORT_SEQUENCING",
+ "commandline":"./accounts_creation_onboard_instatiate_systest_repeat_option --test-name 'TC_PINGPONG_EXPLICIT_PORT_SEQUENCING' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --port-sequencing --use-xml-mode",
+ "test_description":"System test to verify explicit port sequencings feature for ping and pong vnf (Openstack) using xml-agent",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_UNFILTERED_VIRTUAL_INTERFACE",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_UNFILTERED_VIRTUAL_INTERFACE' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --port-security",
+ "test_description":"System test to check unfiltered virtual interface for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "vim_host_override": "10.66.4.32",
+ "vim_ssl_enabled": false,
+ "vim_user_domain_override": "default",
+ "vim_project_domain_override": "default",
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
"test_name":"TC_PINGPONG_RECORDS_CLOUDSIM",
"commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_CLOUDSIM' --sysinfo --netconf --restconf",
"test_description":"System test for ping and pong vnf (Cloudsim)",
+ "allow_rpm_install": true,
"run_as_root": true,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2600,
"networks":[],
"target_vm":"rift_auto_launchpad",
{
- "license": "Apache 2.0",
"test_name":"TC_PINGPONG_RECORDS_OPENSTACK",
+ "license": "Apache 2.0",
"commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --restconf",
"test_description":"System test for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": true,
"status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["ci","nightly","smoke","MANO","openstack"],
"timelimit": 2600,
"networks":[],
"vms":[
{
- "license": "Apache 2.0",
"test_name":"TC_PINGPONG_RECORDS_OPENSTACK_XML",
+ "license": "Apache 2.0",
"commandline":"./pingpong_records_systest --test-name 'TC_PINGPONG_RECORDS_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
"test_description":"System test for ping and pong vnf (Openstack)",
"run_as_root": true,
+ "allow_production_launchpad": true,
+ "xml_mode": true,
"status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["ci","nightly","MANO","openstack"],
"timelimit": 2600,
"networks":[],
"vms":[
"test_name":"TC_PINGPONG_SCALING_OPENSTACK",
"commandline":"./pingpong_scaling_systest --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants}",
"test_description":"Scaling system test for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
- "timelimit": 2200,
+ "keywords":["nightly","MANO","openstack"],
+ "timelimit": 4000,
"networks":[],
"vms":[
{
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_STATICIP",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_STATICIP' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --static-ip",
+ "test_description":"System test to check static-ip for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_STATICIP_IPV6",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_STATICIP_IPV6' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --static-ip --ipv6",
+ "test_description":"System test to check static-ip(ipv6) for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"broken",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_UPDATE_DESCRIPTORS_INSTANTIATE",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_UPDATE_DESCRIPTORS_INSTANTIATE' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --update-vnfd-instantiate",
+ "test_description":"System test to update VNF descriptors and then instantiate NS for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_VNF_DEPENDENCIES",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_VNF_DEPENDENCIES' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --vnf-dependencies",
+ "test_description":"System test to check vnf dependencies for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_PINGPONG_VNF_DEPENDENCIES_XML",
+ "commandline":"./accounts_creation_onboard_instatiate_systest --test-name 'TC_PINGPONG_VNF_DEPENDENCIES_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --use-xml-mode --user={user} {tenants} --restconf --vnf-dependencies",
+ "test_description":"System test to check vnf dependencies for ping and pong vnf (Openstack)",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
"test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK",
"commandline":"./pingpong_vnf_reload_systest --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK' --cloud-type 'openstack' --sysinfo --cloud-host={cloud_host} --user={user} {tenants} --restconf",
"test_description":"System test for ping pong vnf reload(Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": false,
- "status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
{
- "license": "Apache 2.0",
"test_name":"TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML",
+ "license": "Apache 2.0",
"commandline":"./pingpong_vnf_reload_systest --test-name 'TC_PINGPONG_VNF_RELOAD_OPENSTACK_XML' --cloud-type 'openstack' --sysinfo --use-xml-mode --cloud-host={cloud_host} --user={user} {tenants} --restconf",
"test_description":"System test for ping pong vnf reload(Openstack)",
"run_as_root": false,
+ "allow_production_launchpad": true,
+ "xml_mode": true,
"status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["ci","nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
"commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_CLOUDSIM'",
"target_vm":"VM",
"test_description":"System test for ping and pong vnf",
+ "allow_rpm_install": true,
"run_as_root": true,
"status":"broken",
- "keywords":["nightly","smoke","smoke_stable","MANO","cloudsim"],
+ "keywords":["nightly","MANO","cloudsim"],
"timelimit": 1800,
"networks":[],
"vms":[
"test_name":"TC_PINGPONG_VNF_OPENSTACK",
"commandline":"./pingpong_vnf_systest --test-name 'TC_PINGPONG_VNF_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --sysinfo",
"test_description":"System test for ping and pong vnf (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
--- /dev/null
+{
+ "test_name":"TC_PRIMITIVES",
+ "commandline":"./primitives_systest --test-name 'TC_PRIMITIVES' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --vnf-dependencies --service-primitive",
+ "test_description":"System test to check service primitives & config primitives",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["ci","nightly","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_ACCOUNT_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_ACCOUNT_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --account-test",
+ "test_description":"System test to perform role based authorization check for cloud-account creation/deletion etc",
+ "allow_production_launchpad": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_ACCOUNT_ROLES_TEST_RESTCONF",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_ACCOUNT_ROLES_TEST_RESTCONF' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --account-test",
+ "test_description":"RBAC-RESTCONF: System test to perform role based authorization check for cloud-account creation/deletion etc",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_BASICS_TEST",
+ "commandline":"./rbac_basics_systest --test-name 'TC_RBAC_BASICS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to perform rbac basics test",
+ "allow_production_launchpad": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_BASICS_TEST_RESTCONF",
+ "commandline":"./rbac_basics_systest --test-name 'TC_RBAC_BASICS_TEST_RESTCONF' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf",
+ "test_description":"RBAC-RESTCONF: System test to perform rbac basics test",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_TEST_IDENTITY",
+ "commandline":"./rbac_identity --test-name 'TC_RBAC_TEST_IDENTITY' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to perform rbac identity tests",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_MANO_XPATHS_ACCESS_TEST",
+ "commandline":"./rbac_mano_xpaths_access --test-name 'TC_RBAC_MANO_XPATHS_ACCESS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to check whether Mano roles/Permission mapping works (Verifies only read access for all Xpaths)",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_MANO_XPATHS_ACCESS_TEST_RESTCONF",
+ "commandline":"./rbac_mano_xpaths_access --test-name 'TC_RBAC_MANO_XPATHS_ACCESS_TEST_RESTCONF' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf",
+ "test_description":"RBAC-RESTCONF: System test to check whether Mano roles/Permission mapping works (Verifies only read access for all Xpaths)",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_NSR_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_NSR_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --nsr-test",
+ "test_description":"System test to perform role based authorization check for NSR creation/termination etc",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_ONBOARDING_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_ONBOARDING_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --onboarding-test",
+ "test_description":"System test to perform role based authorization check for onboarding/deleting descriptors etc",
+ "allow_production_launchpad": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_PROJECT_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_PROJECT_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --project-creation-test",
+ "test_description":"System test to perform role based authorization check for project creation/deletion etc",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_REDUNDANCY_CONFIG_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_REDUNDANCY_CONFIG_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --redundancy-role-test",
+ "test_description":"System test to perform role based authorization check for redundancy config creation/deletion etc",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_SYSLOG_SERVER_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_SYSLOG_SERVER_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --syslog-server-test",
+ "test_description":"System test to perform role based authorization check for setting/reading syslog server address",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_USAGE_SCENARIOS_TEST",
+ "commandline":"./rbac_usage_scenarios_systest --test-name 'TC_RBAC_USAGE_SCENARIOS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
+ "test_description":"System test to perform rbac usage scenarios",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_USAGE_SCENARIOS_TEST_RESTCONF",
+ "commandline":"./rbac_usage_scenarios_systest --test-name 'TC_RBAC_USAGE_SCENARIOS_TEST_RESTCONF' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf",
+ "test_description":"RBAC-RESTCONF: System test to perform rbac usage scenarios",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_RBAC_USER_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_RBAC_USER_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf --user-creation-test",
+ "test_description":"System test to perform role based authorization check for user creation/deletion etc",
+ "allow_production_launchpad": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
"test_name":"TC_TASKLET_RECOVERY_OPENSTACK",
"commandline":"./pingpong_recovery_systest --test-name 'TC_TASKLET_RECOVERY_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --netconf",
"test_description":"System test for testing the DTS recovery feature of tasklets (Openstack)",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
"run_as_root": false,
- "status":"working",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "status":"broken",
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
{
"test_name":"TC_SCALING_OPENSTACK",
- "commandline":"./scaling_systest --test-name 'TC_SCALING_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --tenant={tenant} --sysinfo",
+ "commandline":"./scaling_systest --test-name 'TC_SCALING_OPENSTACK' --cloud-type 'openstack' --cloud-host={cloud_host} --user={user} {tenants} --sysinfo",
"test_description":"System test for scaling HAProxy vnf (Openstack)",
+ "allow_rpm_install": true,
"run_as_root": false,
"status":"broken",
- "keywords":["nightly","smoke","MANO","openstack"],
+ "keywords":["nightly","MANO","openstack"],
"timelimit": 2200,
"networks":[],
"vms":[
--- /dev/null
+{
+ "test_name":"TC_TBAC_ACCOUNT_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_ACCOUNT_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --account-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for cloud-account creation/deletion etc",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_ACCOUNT_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_ACCOUNT_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --account-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for cloud-account creation/deletion etc",
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_BASICS_TEST",
+ "commandline":"./rbac_basics_systest --test-name 'TC_TBAC_BASICS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac basics test",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_BASICS_TEST_XML",
+ "commandline":"./rbac_basics_systest --test-name 'TC_TBAC_BASICS_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac basics test",
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_TEST_IDENTITY",
+ "commandline":"./rbac_identity --test-name 'TC_TBAC_TEST_IDENTITY' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac identity tests",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_TEST_IDENTITY_XML",
+ "commandline":"./rbac_identity --test-name 'TC_TBAC_TEST_IDENTITY_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac identity tests",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_MANO_XPATHS_ACCESS_TEST",
+ "commandline":"./rbac_mano_xpaths_access --test-name 'TC_TBAC_MANO_XPATHS_ACCESS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to check whether Mano roles/Permission mapping works (Verifies only read access for all Xpaths)",
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_MANO_XPATHS_ACCESS_TEST_XML",
+ "commandline":"./rbac_mano_xpaths_access --test-name 'TC_TBAC_MANO_XPATHS_ACCESS_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to check whether Mano roles/Permission mapping works (Verifies only read access for all Xpaths)",
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_NSR_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_NSR_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --nsr-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for NSR creation/termination etc",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_NSR_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_NSR_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --nsr-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for NSR creation/termination etc",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_ONBOARDING_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_ONBOARDING_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --onboarding-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for onboarding/deleting descriptors etc",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_ONBOARDING_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_ONBOARDING_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --onboarding-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for onboarding/deleting descriptors etc",
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_PROJECT_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_PROJECT_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --project-creation-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for project creation/deletion etc",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_PROJECT_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_PROJECT_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --project-creation-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for project creation/deletion etc",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_SYSLOG_SERVER_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_SYSLOG_SERVER_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --syslog-server-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for setting/reading syslog server address",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_SYSLOG_SERVER_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_SYSLOG_SERVER_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --syslog-server-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for setting/reading syslog server address",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_TOKEN",
+ "commandline":"./tbac_token --test-name 'TC_TBAC_TOKEN' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"System test to perform tbac token tests",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_TOKEN_XML",
+ "commandline":"./tbac_token --test-name 'TC_TBAC_TOKEN_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"System test to perform tbac token tests",
+ "run_as_root": true,
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_USAGE_SCENARIOS_TEST",
+ "commandline":"./rbac_usage_scenarios_systest --test-name 'TC_TBAC_USAGE_SCENARIOS_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac usage scenarios",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_USAGE_SCENARIOS_TEST_XML",
+ "commandline":"./rbac_usage_scenarios_systest --test-name 'TC_TBAC_USAGE_SCENARIOS_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --tbac",
+ "test_description":"TBAC: System test to perform rbac usage scenarios",
+ "run_as_root": true,
+ "allow_rpm_install":true,
+ "allow_production_launchpad":true,
+ "xml_mode": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_USER_ROLES_TEST",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_USER_ROLES_TEST' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --user-creation-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for user creation/deletion etc",
+ "allow_production_launchpad": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+{
+ "test_name":"TC_TBAC_USER_ROLES_TEST_XML",
+ "commandline":"./rbac_roles_systest --test-name 'TC_TBAC_USER_ROLES_TEST_XML' --cloud-type 'openstack' --cloud-host={cloud_host} --sysinfo --user={user} {tenants} --restconf --user-creation-test --tbac",
+ "test_description":"TBAC: System test to perform role based authorization check for user creation/deletion etc",
+ "allow_production_launchpad": true,
+ "xml_mode": true,
+ "allow_rpm_install": true,
+ "run_as_root": true,
+ "status":"working",
+ "keywords":["nightly","ci","MANO","openstack"],
+ "timelimit": 2600,
+ "networks":[],
+ "vms":[
+ {
+ "name": "rift_auto_launchpad",
+ "memory": 4096,
+ "cpus": 2
+ }
+ ]
+}
+
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'not (test_delete_projects or test_delete_users)' \
+ ${PYTEST_DIR}/system/ns/rbac/test_rbac.py"
+
+REBOOT_SCRIPT_TEST="py.test -x -v -p no:cacheprovider -k 'TestRbacVerification or (Teardown and not test_delete_default_project)' \
+ ${PYTEST_DIR}/system/ns/rbac/test_rbac.py --default-project-deleted"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+echo "############### test_cmd - ", $test_cmd
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/ns/rbac/test_rbac_identity.py"
+
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/test_launchpad.py ${PYTEST_DIR}/system/ns/test_onboard.py ${PYTEST_DIR}/system/ns/rbac/test_rbac_mano_xpath_access.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+echo "############### test_cmd - ", $test_cmd
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/ns/rbac/test_rbac_roles.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider ${PYTEST_DIR}/system/ns/rbac/test_rbac_usages.py"
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
--- /dev/null
+#!/bin/bash
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $RIFT_INSTALL/usr/rift/systemtest/util/mano/mano_common.sh
+
+# Helper script for invoking the mission control system test using the systest_wrapper
+SCRIPT_TEST="py.test -x -v -p no:cacheprovider \
+ ${PYTEST_DIR}/system/ns/rbac/test_tbac_token.py"
+
+
+test_cmd=""
+
+# Parse commonline argument and set test variables
+parse_args "${@}"
+
+# Construct the test command based on the test variables
+construct_test_command
+
+# Execute from pytest root directory to pick up conftest.py
+cd "${PYTEST_DIR}"
+
+eval ${test_cmd}
PROGRAMS
launchpad.py
DESTINATION demos
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
pytest/lp_test.py
DESTINATION
usr/rift/systemtest/pytest/launchpad
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
launchpad_recovery
DESTINATION
usr/rift/systemtest/launchpad
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
)
install(
PROGRAMS
launchpad
DESTINATION usr/bin
- COMPONENT rwcal-1.0
+ COMPONENT ${INSTALL_COMPONENT}
)
rift_py3test(utest_rwmonitor
from rift.vcs.ext import ClassProperty
+
logger = logging.getLogger(__name__)
+IDP_PORT_NUMBER = "8009"
+
+def get_launchpad_address():
+ # Search for externally accessible IP address with netifaces
+ gateways = netifaces.gateways()
+ # Check for default route facing interface and then get its ip address
+ if 'default' in gateways:
+ interface = gateways['default'][netifaces.AF_INET][1]
+ launchpad_ip_address = netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
+ else:
+ # no default gateway. Revert to 127.0.0.1
+ launchpad_ip_address = "127.0.0.1"
+
+ return launchpad_ip_address
class NsmTasklet(rift.vcs.core.Tasklet):
"""
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a NsmTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwnsmtasklet')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a VnsTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnstasklet')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a VnfmTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwvnfmtasklet')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a ResMgrTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwresmgrtasklet')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a Image Manager Tasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwimagemgrtasklet')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitorTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonitor')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ANY_VM.value,
):
super(RedisServer, self).__init__(
name=name,
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
@property
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitoringParameterTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwmonparam')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a MonitoringParameterTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwautoscaler')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a StagingMangerTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwstagingmgr')
plugin_name = ClassProperty('rwstagingmgr')
-def get_ui_ssl_args():
- """Returns the SSL parameter string for launchpad UI processes"""
-
- try:
- use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
- except certs.BootstrapSslMissingException:
- logger.error('No bootstrap certificates found. Disabling UI SSL')
- use_ssl = False
-
- # If we're not using SSL, no SSL arguments are necessary
- if not use_ssl:
- return ""
-
- return "--enable-https --keyfile-path=%s --certfile-path=%s" % (keyfile_path, certfile_path)
-
class UIServer(rift.vcs.NativeProcess):
def __init__(self, name="RW.MC.UI",
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
+ external_address=None,
):
super(UIServer, self).__init__(
name=name,
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
+ self._external_address = external_address
@property
def args(self):
- return get_ui_ssl_args()
+ return self._get_ui_args()
+
+ def _get_ui_args(self):
+ """Returns the SSL parameter string for launchpad UI processes"""
+
+ try:
+ use_ssl, certfile_path, keyfile_path = certs.get_bootstrap_cert_and_key()
+ except certs.BootstrapSslMissingException:
+ logger.error('No bootstrap certificates found. Disabling UI SSL')
+ use_ssl = False
+
+ # If we're not using SSL, no SSL arguments are necessary
+ if not use_ssl:
+ return ""
+
+ # If an external address is set, take that value for launchpad IP
+ # address, else use the internal IP address used for default route
+ launchpad_ip_address = self._external_address
+ if not launchpad_ip_address:
+ launchpad_ip_address = get_launchpad_address()
+
+ return "--enable-https" +\
+ " --keyfile-path={}".format(keyfile_path) +\
+ " --certfile-path={}".format(certfile_path) +\
+ " --launchpad-address={}".format(launchpad_ip_address) +\
+ " --idp-port-number={}".format(IDP_PORT_NUMBER) +\
+ " --callback-address={}".format(launchpad_ip_address)
+
class ConfigManagerTasklet(rift.vcs.core.Tasklet):
"""
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a ConfigManagerTasklet object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwconmantasklet')
plugin_name = ClassProperty('rwconmantasklet')
+
+class ProjectMgrManoTasklet(rift.vcs.core.Tasklet):
+ """
+ This class represents a Resource Manager tasklet.
+ """
+
+ def __init__(self, name='Project-Manager-Mano', uid=None,
+ config_ready=True,
+ recovery_action=core.RecoveryType.FAILCRITICAL.value,
+ data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
+ ):
+ """
+ Creates a ProjectMgrManoTasklet object.
+
+ Arguments:
+ name - the name of the tasklet
+ uid - a unique identifier
+ """
+ super(ProjectMgrManoTasklet, self).__init__(name=name, uid=uid,
+ config_ready=config_ready,
+ recovery_action=recovery_action,
+ data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
+ )
+
+ plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwprojectmano')
+ plugin_name = ClassProperty('rwprojectmano')
+
+
class PackageManagerTasklet(rift.vcs.core.Tasklet):
"""
This class represents a Resource Manager tasklet.
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
"""
Creates a PackageManager object.
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
plugin_directory = ClassProperty('./usr/lib/rift/plugins/rwpkgmgr')
config_ready=True,
recovery_action=core.RecoveryType.FAILCRITICAL.value,
data_storetype=core.DataStore.NOSTORE.value,
+ ha_startup_mode=core.HaStartup.ONLY_ACTIVE.value,
):
super(GlanceServer, self).__init__(
name=name,
config_ready=config_ready,
recovery_action=recovery_action,
data_storetype=data_storetype,
+ ha_startup_mode=ha_startup_mode,
)
@property
class Demo(rift.vcs.demo.Demo):
- def __init__(self, no_ui=False, ha_mode=None, mgmt_ip_list=[], test_name=None):
- procs = [
- ConfigManagerTasklet(),
- GlanceServer(),
- rift.vcs.DtsRouterTasklet(),
- rift.vcs.MsgBrokerTasklet(),
- rift.vcs.RestPortForwardTasklet(),
- rift.vcs.RestconfTasklet(),
- rift.vcs.RiftCli(),
- rift.vcs.uAgentTasklet(),
- rift.vcs.Launchpad(),
- ]
-
- standby_procs = [
- RedisServer(),
- rift.vcs.DtsRouterTasklet(),
- rift.vcs.MsgBrokerTasklet(),
- ]
+ def __init__(self, no_ui=False,
+ data_store=None,
+ mgmt_ip_list=[],
+ test_name=None,
+ start_auth_svc=None,
+ start_pam_svc=None,
+ external_address=None):
datastore = core.DataStore.BDB.value
- if ha_mode:
- procs.append(RedisServer())
+ if data_store == "Redis":
datastore = core.DataStore.REDIS.value
+ elif data_store == "None":
+ datastore = core.DataStore.NOSTORE.value
+
+ restart_db_active = {"recovery_action" : core.RecoveryType.RESTART.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ONLY_ACTIVE.value}
+
+ failcrit_db_active = {"recovery_action" : core.RecoveryType.FAILCRITICAL.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ONLY_ACTIVE.value}
+
+ failcrit_db_any = {"recovery_action" : core.RecoveryType.FAILCRITICAL.value, \
+ "data_storetype" : datastore, \
+ "ha_startup_mode" : core.HaStartup.ANY_VM.value}
+
+ procs = [
+ ConfigManagerTasklet(**failcrit_db_active),
+ GlanceServer(**failcrit_db_active),
+ rift.vcs.DtsRouterTasklet(**failcrit_db_any),
+ rift.vcs.MsgBrokerTasklet(**failcrit_db_any),
+ rift.vcs.RestconfTasklet(**failcrit_db_active),
+ rift.vcs.RiftCli(**failcrit_db_active, as_console=True),
+ rift.vcs.uAgentTasklet(**failcrit_db_any),
+ rift.vcs.Launchpad(**failcrit_db_active),
+ rift.vcs.IdentityManagerTasklet(**failcrit_db_active),
+ rift.vcs.ProjectManagerTasklet(**failcrit_db_active),
+ rift.vcs.HAManager(**failcrit_db_any),
+ rift.vcs.OpenIDCProviderTasklet(**failcrit_db_active),
+ rift.vcs.AuthExtUserTasklet(**failcrit_db_active),
+ rift.vcs.OTTAuthTasklet(**failcrit_db_active),
+ NsmTasklet(**failcrit_db_active),
+ VnfmTasklet(**failcrit_db_active),
+ VnsTasklet(**failcrit_db_active),
+ ResMgrTasklet(**failcrit_db_active),
+ ImageMgrTasklet(**failcrit_db_active),
+ AutoscalerTasklet(**failcrit_db_active),
+ StagingManagerTasklet(**failcrit_db_active),
+ PackageManagerTasklet(**failcrit_db_active),
+ MonitoringParameterTasklet(**failcrit_db_active),
+ ProjectMgrManoTasklet(**failcrit_db_active)
+ ]
+
+ if datastore == core.DataStore.REDIS.value:
+ procs.append(RedisServer(**failcrit_db_any))
if not no_ui:
- procs.append(UIServer())
-
- restart_procs = [
- VnfmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- VnsTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- # MonitorTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- MonitoringParameterTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- NsmTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ResMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ImageMgrTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- AutoscalerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- PackageManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- StagingManagerTasklet(recovery_action=core.RecoveryType.RESTART.value, data_storetype=datastore),
- ]
+ procs.append(UIServer(external_address=external_address))
- if not mgmt_ip_list or len(mgmt_ip_list) == 0:
- mgmt_ip_list.append("127.0.0.1")
+ if start_auth_svc:
+ procs.append(rift.vcs.WebAuthSvcTasklet(**failcrit_db_active))
- colony = rift.vcs.core.Colony(name='top', uid=1)
+ if start_pam_svc:
+ procs.append(rift.vcs.PAMAuthTasklet())
- lead_lp_vm = rift.vcs.VirtualMachine(
- name='vm-launchpad-1',
- ip=mgmt_ip_list[0],
- procs=procs,
- restart_procs=restart_procs,
- )
- lead_lp_vm.leader = True
- colony.append(lead_lp_vm)
-
- if ha_mode:
- stby_lp_vm = rift.vcs.VirtualMachine(
- name='launchpad-vm-2',
- ip=mgmt_ip_list[1],
- procs=standby_procs,
- start=False,
- )
- # WA to Agent mode_active flag reset
- stby_lp_vm.add_tasklet(rift.vcs.uAgentTasklet(), mode_active=False)
- colony.append(stby_lp_vm)
+ restart_procs = []
+
+ if not mgmt_ip_list or len(mgmt_ip_list) == 0:
+ mgmt_ip_list.append(get_launchpad_address())
+ colony = rift.vcs.core.Colony(name='top', uid=1)
+ leader = 0
+ for mgmt_ip in mgmt_ip_list:
+ vm = rift.vcs.VirtualMachine(name='mgmt-vm-lp',
+ ip=mgmt_ip,
+ procs=procs,
+ restart_procs=restart_procs,start=False,)
+ if (leader == 0):
+ vm.leader = True
+ leader = 1
+ colony.append(vm)
+
sysinfo = rift.vcs.SystemInfo(
mode='ethsim',
zookeeper=rift.vcs.manifest.RaZookeeper(master_ip=mgmt_ip_list[0]),
# Create a parser which includes all generic demo arguments
parser = rift.vcs.demo.DemoArgParser()
parser.add_argument("--no-ui", action='store_true')
+ parser.add_argument("--start-auth-svc",
+ action='store_true',
+ help="Start the Web Based Authentication service simualtor.")
+ parser.add_argument("--start-pam-svc",
+ action='store_true',
+ help="Start the PAM Authentication service.")
+ parser.add_argument("--external-address",
+ type=str,
+ help="External IP address or hostname using which the host can "+
+ "be reached.")
+ if rift.vcs.mgmt.default_agent_mode() == 'CONFD':
+ parser.add_argument("--use-osm-model",
+ action='store_true',
+ help="Load only OSM specific models and hide the Rift Specific Augments")
+
args = parser.parse_args(argv)
# Disable loading any kernel modules for the launchpad VM
# since it doesn't need it and it will fail within containers
os.environ["NO_KERNEL_MODS"] = "1"
+ # Get external_address from env if args not set
+ if args.external_address is None:
+ args.external_address = os.getenv("RIFT_EXTERNAL_ADDRESS")
+
+ os.environ["RIFT_EXTERNAL_ADDRESS"] = \
+ args.external_address if args.external_address else get_launchpad_address()
+
cleanup_dir_name = None
- if os.environ["INSTALLDIR"] in ["/", "/home/rift", "/home/rift/.install",
- "/usr/rift/build/fc20_debug/install/usr/rift", "/usr/rift"]:
+ if os.environ["INSTALLDIR"] in ["/usr/rift",
+ "/usr/rift/build/ub16_debug/install/usr/rift",
+ "/usr/rift/build/fc20_debug/install/usr/rift"]:
cleanup_dir_name = os.environ["INSTALLDIR"] + "/var/rift/"
if args.test_name and not cleanup_dir_name:
for f in os.listdir(cleanup_dir_name):
if f.endswith(".aof") or f.endswith(".rdb"):
os.remove(os.path.join(cleanup_dir_name, f))
-
- # Remove the persistant DTS recovery files
+
+ # Remove the persistant DTS recovery files
for f in os.listdir(cleanup_dir_name):
if f.endswith(".db"):
os.remove(os.path.join(cleanup_dir_name, f))
except Exception as e:
print ("Error while cleanup: {}".format(str(e)))
- ha_mode = args.ha_mode
+ datastore = args.datastore
mgmt_ip_list = [] if not args.mgmt_ip_list else args.mgmt_ip_list
#load demo info and create Demo object
- demo = Demo(args.no_ui, ha_mode, mgmt_ip_list, args.test_name)
+ demo = Demo(args.no_ui,
+ datastore,
+ mgmt_ip_list,
+ args.test_name,
+ args.start_auth_svc,
+ args.start_pam_svc,
+ args.external_address)
+
+ if 'use_osm_model' in args and args.use_osm_model:
+ northbound_listing = ["platform_schema_listing.txt",
+ "platform_mgmt_schema_listing.txt",
+ "cli_launchpad_schema_listing.txt"]
+ args.use_xml_mode = True
+
+ else:
+ northbound_listing = ["platform_schema_listing.txt",
+ "platform_mgmt_schema_listing.txt",
+ "cli_launchpad_schema_listing.txt",
+ "cli_launchpad_rift_specific_schema_listing.txt"]
# Create the prepared system from the demo
- system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
- northbound_listing="cli_launchpad_schema_listing.txt",
- netconf_trace_override=True)
+ system = rift.vcs.demo.prepared_system_from_demo_and_args(
+ demo, args,
+ northbound_listing=northbound_listing,
+ netconf_trace_override=True)
- # Search for externally accessible IP address with netifaces
- gateways = netifaces.gateways()
- # Check for default route facing interface and then get its ip address
- if 'default' in gateways:
- interface = gateways['default'][netifaces.AF_INET][1]
- confd_ip = netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']
- else:
- # no default gateway. Revert to 127.0.0.1
- confd_ip = "127.0.0.1"
+ confd_ip = get_launchpad_address()
# TODO: This need to be changed when launchpad starts running on multiple VMs
rift.vcs.logger.configure_sink(config_file=None, confd_ip=confd_ip)
# Start the prepared system
system.start()
-
if __name__ == "__main__":
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) )
+ os.system('/usr/rift/bin/UpdateHostsFile')
try:
main()
except rift.vcs.demo.ReservationError:
@classmethod
def configure_schema(cls):
schema = RwYang.Model.load_and_merge_schema(rwvcs.get_schema(), 'librwcal_yang_gen.so', 'Rwcal')
- cls.model = RwYang.Model.create_libncx()
+ cls.model = RwYang.Model.create_libyang()
cls.model.load_schema_ypbc(schema)
xml = cls.manifest.to_xml_v2(cls.model, 1)
xml = re.sub('rw-manifest:', '', xml)
manifest = rwmanifest.Manifest()
manifest.bootstrap_phase = rwmanifest.BootstrapPhase.from_dict({
"rwmgmt": {
- "northbound_listing": [ "cli_launchpad_schema_listing.txt" ]
+ "northbound_listing": [ "platform_schema_listing.txt", "platform_mgmt_schema_listing.txt", "cli_launchpad_schema_listing.txt" ]
},
"rwtasklet": {
"plugin_name": "rwinit-c"
"recovery_action": "RESTART",
"config_ready": True
}
- },
-# {
-# "name": "Start the RW.CLI",
-# "start": {
-# "component_name": "RW.CLI",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }
-# },
+ },
{
"name": "Start the RW.Proc_1.Restconf",
"start": {
"config_ready": True
}
},
-# {
-# "name": "Start the RW.Proc_2.RestPortForward",
-# "start": {
-# "component_name": "RW.Proc_2.RestPortForward",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }
-# },
{
"name": "Start the RW.Proc_3.CalProxy",
"start": {
"plugin_name": "restconf"
}
},
-# {
-# "component_name": "RW.Proc_2.RestPortForward",
-# "component_type": "RWPROC",
-# "rwproc": {
-# "tasklet": [{
-# "name": "Start RW.RestPortForward for RW.Proc_2.RestPortForward",
-# "component_name": "RW.RestPortForward",
-# "recovery_action": "RESTART",
-# "config_ready": True
-# }]
-# }
-# },
-# {
-# "component_name": "RW.RestPortForward",
-# "component_type": "RWTASKLET",
-# "rwtasklet": {
-# "plugin_directory": "./usr/lib/rift/plugins/restportforward",
-# "plugin_name": "restportforward"
-# }
-# },
{
"component_name": "RW.Proc_3.CalProxy",
"component_type": "RWPROC",
)
resource_info.update(self._vdu_info)
- response = RwResourceMgrYang.VDUEventData.from_dict(dict(
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData.from_dict(dict(
event_id=self._event_id,
request_info=self._request_info.as_dict(),
resource_info=resource_info,
)
resource_info.update(self._link_info)
- response = RwResourceMgrYang.VirtualLinkEventData.from_dict(dict(
+ response = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData.from_dict(dict(
event_id=self._event_id,
request_info=self._request_info.as_dict(),
resource_info=resource_info,
class ResourceMgrMock(object):
- VDU_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
- VLINK_REQUEST_XPATH = "D,/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
+ VDU_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vdu-event/vdu-event-data"
+ VLINK_REQUEST_XPATH = "D,/rw-project:project/rw-resource-mgr:resource-mgmt/vlink-event/vlink-event-data"
def __init__(self, dts, log, loop):
self._log = log
response_info = None
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VirtualLinkEventData().schema()
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VlinkEvent_VlinkEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
return
@asyncio.coroutine
- def monitor_vdu_state(response_xpath, pathentry):
+ def monitor_vdu_state(response_xpath, event_id):
self._log.info("Initiating VDU state monitoring for xpath: %s ", response_xpath)
loop_cnt = 120
while loop_cnt > 0:
self._log.debug("VDU state monitoring: Sleeping for 1 second ")
yield from asyncio.sleep(1, loop = self._loop)
try:
- response_info = self._read_virtual_compute(
- pathentry.key00.event_id
- )
+ response_info = self._read_virtual_compute(event_id)
except Exception as e:
self._log.error(
"VDU state monitoring: Received exception %s "
### End of while loop. This is only possible if VDU did not reach active state
self._log.info("VDU state monitoring: VDU at xpath :%s did not reached active state in 120 seconds. Aborting monitoring",
response_xpath)
- response_info = RwResourceMgrYang.VDUEventData_ResourceInfo()
+ response_info = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData_ResourceInfo()
response_info.resource_state = 'failed'
yield from self._dts.query_update(response_xpath,
rwdts.XactFlag.ADVISE,
response_info = None
response_xpath = ks_path.to_xpath(RwResourceMgrYang.get_schema()) + "/resource-info"
- schema = RwResourceMgrYang.VDUEventData().schema()
+ schema = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgmt_VduEvent_VduEventData().schema()
pathentry = schema.keyspec_to_entry(ks_path)
if action == rwdts.QueryAction.CREATE:
request_msg.request_info,
)
if response_info.resource_state == 'pending':
- asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry),
+ asyncio.ensure_future(monitor_vdu_state(response_xpath, pathentry.key00.event_id),
loop = self._loop)
elif action == rwdts.QueryAction.DELETE:
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# limitations under the License.
#
-
+import argparse
import asyncio
+import gi
+import logging
import os
import sys
+import time
+import types
import unittest
import uuid
import xmlrunner
-import argparse
-import logging
-import time
-import types
-import gi
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
RwConfigAgentYang as rwcfg_agent,
RwlogMgmtYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from gi.repository.RwTypes import RwStatus
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
import rift.tasklets
import rift.test.dts
import rw_peas
+from rift.mano.utils.project import (
+ ManoProject,
+ DEFAULT_PROJECT,
+)
+
+PROJECT = 'default'
openstack_info = {
'username': 'pluto',
class XPaths(object):
@staticmethod
def nsd(k=None):
- return ("C,/nsd:nsd-catalog/nsd:nsd" +
- ("[nsd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-nsd:nsd-catalog/project-nsd:nsd" +
+ ("[project-nsd:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vld(k=None):
return ("C,/vld:vld-catalog/vld:vld" +
- ("[vld:id='{}']".format(k) if k is not None else ""))
+ ("[vld:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfd(k=None):
- return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
- ("[vnfd:id='{}']".format(k) if k is not None else ""))
+ return ("C,/project-vnfd:vnfd-catalog/project-vnfd:vnfd" +
+ ("[project-vnfd:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfr(k=None):
return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
- ("[vnfr:id='{}']".format(k) if k is not None else ""))
+ ("[vnfr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vlr(k=None):
return ("D,/vlr:vlr-catalog/vlr:vlr" +
- ("[vlr:id='{}']".format(k) if k is not None else ""))
-
- @staticmethod
- def nsd_ref_count(k=None):
- return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" +
- ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+ ("[vlr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def vnfd_ref_count(k=None):
return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" +
- ("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
+ ("[rw-nsr:nsd-id-ref={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_config(k=None):
return ("C,/nsr:ns-instance-config/nsr:nsr" +
- ("[nsr:id='{}']".format(k) if k is not None else ""))
+ ("[nsr:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_opdata(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- ("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_config_status(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
- ("[nsr:ns-instance-config-ref='{}']/config_status".format(k) if k is not None else ""))
+ ("[nsr:ns-instance-config-ref={}]/config_status".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def cm_state(k=None):
- if k is None:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
- else:
- return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
- ("[rw-conman:id='{}']".format(k) if k is not None else ""))
+ return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
+ ("[rw-conman:id={}]".format(quoted_key(k)) if k is not None else ""))
@staticmethod
def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
return (("D,/nsr:ns-instance-opdata/nsr:nsr") +
- ("[nsr:ns-instance-config-ref='{}']".format(nsr_id) if nsr_id is not None else "") +
+ ("[nsr:ns-instance-config-ref={}]".format(quoted_key(nsr_id)) if nsr_id is not None else "") +
("/nsr:scaling-group-record") +
- ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+ ("[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) if group_name is not None else "") +
("/nsr:instance") +
- ("[nsr:scaling-group-index-ref='{}']".format(index) if index is not None else ""))
+ ("[nsr:scaling-group-index-ref={}]".format(quoted_key(index)) if index is not None else ""))
@staticmethod
def nsr_scale_group_instance_config(nsr_id=None, group_name=None, index=None):
return (("C,/nsr:ns-instance-config/nsr:nsr") +
- ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else "") +
+ ("[nsr:id={}]".format(nsr_id) if nsr_id is not None else "") +
("/nsr:scaling-group") +
- ("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
+ ("[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) if group_name is not None else "") +
("/nsr:instance") +
- ("[nsr:index='{}']".format(index) if index is not None else ""))
+ ("[nsr:index={}]".format(quoted_key(index)) if index is not None else ""))
+
+ @staticmethod
+ def cloud_account(k=None):
+ return ("C,/rw-cloud:cloud/rw-cloud:account" +
+ ("[rw-cloud:name={}]".format(quoted_key(k)) if k is not None else ""))
+
+ @staticmethod
+ def project(k=None):
+ return ("C,/rw-project:project" +
+ ("[rw-project:name={}]".format(quoted_key(k)) if k is not None else ""))
class ManoQuerier(object):
- def __init__(self, log, dts):
+ def __init__(self, log, dts, project):
self.log = log
self.dts = dts
+ self.project = project
+
+ def add_project(self, xpath):
+ return self.project.add_project(xpath)
@asyncio.coroutine
- def _read_query(self, xpath, do_trace=False):
- self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
+ def _read_query(self, xpath, do_trace=False, project=True):
+ if project:
+ xp = self.add_project(xpath)
+ else:
+ xp = xpath
+ self.log.debug("Running XPATH read query: %s (trace: %s)", xp, do_trace)
flags = rwdts.XactFlag.MERGE
flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self.dts.query_read(
- xpath, flags=flags
+ xp, flags=flags
)
results = []
return results
+ @asyncio.coroutine
+ def _delete_query(self, xpath, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH delete query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_delete(
+ xp,
+ flags
+ )
+
+ @asyncio.coroutine
+ def _update_query(self, xpath, msg, flags=0):
+ xp = self.add_project(xpath)
+ self.log.debug("Running XPATH update query: %s (flags: %d)", xp, flags)
+ with self.dts.transaction() as xact:
+ yield from self.dts.query_update(
+ xp,
+ flags,
+ msg
+ )
+
@asyncio.coroutine
def get_cm_state(self, nsr_id=None):
return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
@asyncio.coroutine
def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
- #return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
@asyncio.coroutine
def get_nsr_configs(self, nsr_id=None):
def get_vlrs(self, vlr_id=None):
return (yield from self._read_query(XPaths.vlr(vlr_id)))
- @asyncio.coroutine
- def get_nsd_ref_counts(self, nsd_id=None):
- return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id)))
-
@asyncio.coroutine
def get_vnfd_ref_counts(self, vnfd_id=None):
return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id)))
@asyncio.coroutine
def delete_nsr(self, nsr_id):
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- XPaths.nsr_config(nsr_id),
- 0
- #rwdts.XactFlag.TRACE,
- #rwdts.Flag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsr_config(nsr_id)))
@asyncio.coroutine
def delete_nsd(self, nsd_id):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.nsd(nsd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_delete(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- )
+ return (yield from self._delete_query(XPaths.vnfd(vnfd_id),
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsd(self, nsd_id, nsd_msg):
- nsd_xpath = XPaths.nsd(nsd_id)
- self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsd_xpath,
- rwdts.XactFlag.ADVISE,
- nsd_msg,
- )
+ return (yield from self._update_query(XPaths.nsd(nsd_id), nsd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_vnfd(self, vnfd_id, vnfd_msg):
- vnfd_xpath = XPaths.vnfd(vnfd_id)
- self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- vnfd_xpath,
- rwdts.XactFlag.ADVISE,
- vnfd_msg,
- )
+ return (yield from self._update_query(XPaths.vnfd(vnfd_id), vnfd_msg,
+ rwdts.XactFlag.ADVISE))
@asyncio.coroutine
def update_nsr_config(self, nsr_id, nsr_msg):
- nsr_xpath = XPaths.nsr_config(nsr_id)
- self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
- with self.dts.transaction() as xact:
- yield from self.dts.query_update(
- nsr_xpath,
- rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
- nsr_msg,
- )
+ return (yield from self._update_query(
+ XPaths.nsr_config(nsr_id),
+ nsr_msg,
+ rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE))
class ManoTestCase(rift.test.dts.AbstractDTSTest):
vnfrs = yield from self.querier.get_vnfrs()
self.assertEqual(num_vnfrs, len(vnfrs))
- @asyncio.coroutine
- def verify_nsd_ref_count(self, nsd_id, num_ref):
- nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
- self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
+
class DescriptorPublisher(object):
- def __init__(self, log, loop, dts):
+ def __init__(self, log, loop, dts, project):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
self._registrations = []
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
+ if 'rw-project' in path:
+ w_xp = w_path
+ xp = path
+ else:
+ w_xp = self.project.add_project(w_path)
+ xp = self.project.add_project(path)
@asyncio.coroutine
def on_ready(regh, status):
self.log.debug("Create element: %s, obj-type:%s obj:%s",
- path, type(desc), desc)
+ xp, type(desc), desc)
with self.dts.transaction() as xact:
- regh.create_element(path, desc, xact.xact)
- self.log.debug("Created element: %s, obj:%s", path, desc)
+ regh.create_element(xp, desc, xact.xact)
+ self.log.debug("Created element: %s, obj:%s", xp, desc)
ready_event.set()
handler = rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready
)
- self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+ self.log.debug("Registering path: %s, obj:%s", w_xp, desc)
reg = yield from self.dts.register(
- w_path,
+ w_xp,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
self._registrations.append(reg)
- self.log.debug("Registered path : %s", w_path)
+ self.log.debug("Registered path : %s", w_xp)
yield from ready_event.wait()
return reg
reg.deregister()
+class ProjectPublisher(object):
+ XPATH = "C,/rw-project:project"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", ProjectPublisher.XPATH)
+ self.reg = yield from self.dts.register(
+ ProjectPublisher.XPATH,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_project(self, config, xpath, xpath_wild):
+ # Publish project
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(config), config)
+ yield from self.publisher.publish(xpath_wild, xpath, config)
+
+
+class CloudAccountPublisher(object):
+ XPATH = "C,/rw-cloud:cloud"
+
+ def __init__(self, log, loop, dts, project):
+ self.dts = dts
+ self.log = log
+ self.loop = loop
+ self.project = project
+ self.ref = None
+
+ self.querier = ManoQuerier(log, dts, project)
+ self.publisher = DescriptorPublisher(log, loop,
+ dts, project)
+
+ self.xpath = self.project.add_project(CloudAccountPublisher.XPATH)
+
+ self._ready_event = asyncio.Event(loop=self.loop)
+ asyncio.ensure_future(self.register(), loop=loop)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def on_ready(regh, status):
+ self._ready_event.set()
+
+ self.log.debug("Registering path: %s", self.xpath)
+ self.reg = yield from self.dts.register(
+ self.xpath,
+ flags=rwdts.Flag.PUBLISHER,
+ handler=rift.tasklets.DTS.RegistrationHandler(
+ on_ready=on_ready,
+ ),
+ )
+
+ def deregister(self):
+ if self.reg is not None:
+ self.reg.deregister()
+
+ @asyncio.coroutine
+ def publish_account(self, account, xpath, xpath_wild):
+ # Publish cloud account
+ self.log.debug("Publishing cloud_account path: %s - %s, type:%s, obj:%s",
+ xpath, xpath_wild, type(account), account)
+ yield from self.publisher.publish(xpath_wild, xpath, account)
+
+
class PingPongNsrConfigPublisher(object):
XPATH = "C,/nsr:ns-instance-config"
- def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
+ def __init__(self, log, loop, dts, ping_pong, cloud_account_name, project):
self.dts = dts
self.log = log
self.loop = loop
+ self.project = project
self.ref = None
- self.querier = ManoQuerier(log, dts)
+ self.querier = ManoQuerier(log, dts, project)
+ self.xpath = self.project.add_project(PingPongNsrConfigPublisher.XPATH)
+ self.nsr_config = rwnsryang.YangData_RwProject_Project_NsInstanceConfig()
- self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
-
- nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = rwnsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "ns1.{}".format(nsr.id)
- nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
+ nsr.nsd = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_Nsd()
nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
nsr.cloud_account = cloud_account_name
#'cloud_account':'mock_account1'
})
- inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
+ inputs = nsryang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ inputs.xpath = self.project.add_project(
+ "/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(quoted_key(ping_pong.nsd_id)))
inputs.value = "inigo montoya"
fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
def on_ready(regh, status):
self._ready_event.set()
- self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
+ self.log.debug("Registering path: %s", self.xpath)
self.reg = yield from self.dts.register(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
flags=rwdts.Flag.PUBLISHER,
handler=rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready,
yield from self._ready_event.wait()
with self.dts.transaction() as xact:
self.reg.create_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
})
with self.dts.transaction() as xact:
self.reg.update_element(
- PingPongNsrConfigPublisher.XPATH,
+ self.xpath,
self.nsr_config,
xact=xact.xact,
)
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
+
def create_vnfd_placement_group_map(self,
nsr,
"cloud_type" : cloud_type,
construct_type : construct_value,
})
-
-
+
+
@asyncio.coroutine
def delete_scale_group_instance(self, group_name, index):
self.log.debug("Deleting scale group %s instance %s", group_name, index)
#del self.nsr_config.nsr[0].scaling_group[0].instance[0]
- xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
+ xpath = self.project.add_project(
+ XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id,
+ group_name, index))
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
- #with self.dts.transaction() as xact:
- # self.reg.update_element(
- # PingPongNsrConfigPublisher.XPATH,
- # self.nsr_config,
- # flags=rwdts.XactFlag.REPLACE,
- # xact=xact.xact,
- # )
def deregister(self):
if self.reg is not None:
def update_vnf_cloud_map(self,vnf_cloud_map):
self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
for vnf_index,cloud_acct in vnf_cloud_map.items():
- vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
+ vnf_maps = [vnf_map for vnf_map in \
+ self.nsr_config.nsr[0].vnf_cloud_account_map \
+ if vnf_index == vnf_map.member_vnf_index_ref]
if vnf_maps:
vnf_maps[0].cloud_account = cloud_acct
- else:
+ else:
self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref':vnf_index,
'cloud_account':cloud_acct
class PingPongDescriptorPublisher(object):
- def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
+ def __init__(self, log, loop, dts, project,
+ num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
self.log = log
self.loop = loop
self.dts = dts
+ self.project = project
- self.querier = ManoQuerier(self.log, self.dts)
- self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.publisher = DescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
ping_pong_nsd.generate_ping_pong_descriptors(
pingcount=1,
internal_vlr_count=num_internal_vlrs,
num_vnf_vms=2,
mano_ut=True,
- use_scale_group=True,
+ use_scale_group=False,
use_mon_params=False,
)
-
- self.config_dir = os.path.join(os.getenv('RIFT_ARTIFACTS'),
- "launchpad/libs",
- self.ping_pong_nsd.id,
- "config")
-
@property
def nsd_id(self):
return self.ping_pong_nsd.id
)
-
-
class ManoTestCase(rift.test.dts.AbstractDTSTest):
"""
DTS GI interface unittests
@staticmethod
def get_cal_account(account_type, account_name):
"""
- Creates an object for class RwcalYang.Clo
+ Creates an object for class RwcalYang.Cloud
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.YangData_RwProject_Project_Cloud_Account()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
return account
@asyncio.coroutine
- def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
+ def configure_project(self, project=None):
+ if project is None:
+ project = self.project
+
+ proj_xpath = "C,{}/project-config".format(project.prefix)
+ self.log.info("Creating project: {} with {}".
+ format(proj_xpath, project.config.as_dict()))
+ xpath_wild = "C,/rw-project:project/project-config"
+ yield from self.project_publisher.publish_project(project.config,
+ proj_xpath,
+ xpath_wild)
+
+ @asyncio.coroutine
+ def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1", project=None):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
self.log.info("Configuring cloud-account: %s", account)
- yield from dts.query_create(account_xpath,
- rwdts.XactFlag.ADVISE,
- account)
+ if project is None:
+ project = self.project
+ xpath = project.add_project(XPaths.cloud_account(account.name))
+ xpath_wild = project.add_project(XPaths.cloud_account())
+
+ # account_xpath = project.add_project(
+ # "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={}]".format(quoted_key(cloud_name)))
+ # yield from dts.query_create(account_xpath,
+ # rwdts.XactFlag.ADVISE,
+ # account)
+ yield from self.cloud_publisher.publish_account(account, xpath, xpath_wild)
@asyncio.coroutine
def wait_tasklets(self):
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
- self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
- self.querier = ManoQuerier(self.log, self.dts)
+ self.project = ManoProject(self.log,
+ name=DEFAULT_PROJECT)
+ self.project1 = ManoProject(self.log,
+ name='test-1')
+ self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop,
+ self.dts, self.project)
+ self.querier = ManoQuerier(self.log, self.dts, self.project)
+ self.project_publisher = ProjectPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
+ self.cloud_publisher = CloudAccountPublisher(
+ self.log,
+ loop,
+ self.dts,
+ self.project
+ )
self.nsr_publisher = PingPongNsrConfigPublisher(
self.log,
loop,
self.dts,
self.ping_pong,
"mock_account",
+ self.project,
)
def test_create_nsr_record(self):
+ @asyncio.coroutine
+ def verify_projects(termination=False):
+ self.log.debug("Verifying projects = %s", XPaths.project())
+
+ accts = yield from self.querier._read_query(XPaths.project(),
+ project=False)
+ projs = []
+ for acc in accts:
+ self.log.debug("Project: {}".format(acc.as_dict()))
+ if acc.name not in projs:
+ projs.append(acc.name)
+ self.log.debug("Merged: {}".format(projs))
+ self.assertEqual(2, len(projs))
+
+ @asyncio.coroutine
+ def verify_cloud_accounts(termination=False):
+ self.log.debug("Verifying cloud accounts = %s", XPaths.cloud_account())
+
+ accts = yield from self.querier._read_query(XPaths.cloud_account())
+ self.assertEqual(2, len(accts))
+
+ accts = yield from self.querier._read_query(
+ self.project1.add_project(XPaths.cloud_account()), project=False)
+ self.assertEqual(1, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account",
+ project=False)
+ self.assertEqual(3, len(accts))
+
+ accts = yield from self.querier._read_query(
+ "C,/rw-project:project/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='mock_account']",
+ project=False)
+ self.assertEqual(2, len(accts))
+
@asyncio.coroutine
def verify_cm_state(termination=False, nsrid=None):
self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
- #print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
loop_count = 10
loop_sleep = 10
nsr_config = nsr_configs[0]
self.assertEqual(
- "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
+ "/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]/project-nsd:name".format(quoted_key(self.ping_pong.nsd_id)),
nsr_config.input_parameter[0].xpath,
)
nsr_opdata_l = yield from self.querier.get_nsr_opdatas(nsrid)
self.assertEqual(1, len(nsr_opdata_l))
nsr_opdata = nsr_opdata_l[0].as_dict()
+ self.log.debug("NSR opdata: {}".format(nsr_opdata))
if ("configured" == nsr_opdata['config_status']):
print("\n###>>> NSR Config Status 'configured' OK <<<###\n")
return
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
- @asyncio.coroutine
- def verify_nsd_ref_count(termination):
- self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count())
- res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count())
-
- for i in res_iter:
- result = yield from i
- self.log.debug("Got nsd ref count record %s", result)
@asyncio.coroutine
def verify_vnfd_ref_count(termination):
#yield from verify_vlr_record(termination)
yield from verify_nsr_opdata(termination)
yield from verify_nsr_config(termination)
- yield from verify_nsd_ref_count(termination)
yield from verify_vnfd_ref_count(termination)
# Config Manager
yield from verify_cm_state(termination, nsrid)
yield from verify_nsr_config_status(termination, nsrid)
+ yield from verify_cloud_account(termination)
+ yield from verify_project_record(termination)
+
@asyncio.coroutine
def verify_scale_instance(index):
self.log.debug("Verifying scale record path = %s, Termination=%d",
def run_test():
yield from self.wait_tasklets()
+ yield from self.configure_project()
+ yield from self.configure_project(project=self.project1)
cloud_type = "mock"
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
+ yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account",
+ project=self.project1)
+
+ yield from verify_cloud_accounts()
+ yield from verify_projects()
yield from self.ping_pong.publish_desciptors()
+ return
# Attempt deleting VNFD not in use
yield from self.ping_pong.update_ping_vnfd()
ConfigManagerTasklet(),
UIServer(),
RedisServer(),
- rift.vcs.RestPortForwardTasklet(),
rift.vcs.RestconfTasklet(),
rift.vcs.RiftCli(),
rift.vcs.uAgentTasklet(),
standby_procs = [
RedisServer(),
- rift.vcs.uAgentTasklet(mode_active=False),
+ rift.vcs.uAgentTasklet()
]
restart_procs = [
# Create the prepared system from the demo
system = rift.vcs.demo.prepared_system_from_demo_and_args(demo, args,
- northbound_listing="cli_launchpad_schema_listing.txt",
+ northbound_listing=["platform_schema_listing.txt", "platform_mgmt_schema_listing.txt", "cli_launchpad_schema_listing.txt"],
netconf_trace_override=True)
confd_ip = socket.gethostbyname(socket.gethostname())
gi.require_version('RwNsrYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-only"
nsr.short_name = "UTM-only"
cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
trans_id = upload_descriptor(logger, utm_only_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
def test_instantiate_utm_only_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwNsrYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}/'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
raise DescriptorOnboardError(state)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "UTM-WIMS"
nsr.short_name = "UTM-WIMS"
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
def test_onboard_ktutm_vnfd(self, logger, vnfd_proxy, kt_utm_vnfd_package_file):
logger.info("Onboarding kt_utm_vnfd package: %s", kt_utm_vnfd_package_file)
trans_id = upload_descriptor(logger, kt_utm_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
trans_id = upload_descriptor(logger, kt_wims_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should only be two vnfd"
assert "kt_wims_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, utm_wims_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
def test_instantiate_utm_wims_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
@brief Launchpad Module Test
"""
+import datetime
+import gi
import json
import logging
import os
import pytest
-import shlex
import requests
+import shlex
import subprocess
import time
import uuid
-import datetime
-import gi
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
-gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwlogMgmtYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-gi.require_version('RwNsmYang', '1.0')
+gi.require_version('ProjectNsdYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
from gi.repository import (
- NsdYang,
+ ProjectNsdYang as NsdYang,
NsrYang,
RwBaseYang,
RwCloudYang,
- RwIwpYang,
RwlogMgmtYang,
RwNsmYang,
RwNsrYang,
RwResourceMgrYang,
RwConmanYang,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
VldYang,
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
logging.basicConfig(level=logging.DEBUG)
raise PackageError("Could not find ns packages")
-@pytest.fixture(scope='module')
-def iwp_proxy(request, mgmt_session):
- return mgmt_session.proxy(RwIwpYang)
-
-
@pytest.fixture(scope='module')
def rwlog_mgmt_proxy(request, mgmt_session):
return mgmt_session.proxy(RwlogMgmtYang)
def create_nsr_from_nsd_id(nsd_id):
- nsr = RwNsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = RwNsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "pingpong_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
nsr.short_name = "nsr_short_name"
nsr.admin_status = "ENABLED"
nsr.cloud_account = "openstack"
- param = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
- param.xpath = '/nsd:nsd-catalog/nsd:nsd/nsd:vendor'
+ param = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter()
+ param.xpath = '/rw-project:project/project-nsd:nsd-catalog/project-nsd:nsd/project-nsd:vendor'
param.value = "rift-o-matic"
nsr.input_parameter.append(param)
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
rwlog_mgmt_proxy.merge_config("/rwlog-mgmt:logging", logging)
def test_configure_cloud_account(self, cloud_proxy, logger):
- cloud_account = RwCloudYang.CloudAccount()
+ cloud_account = RwCloudYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
# cloud_account.name = "cloudsim_proxy"
# cloud_account.account_type = "cloudsim_proxy"
cloud_account.name = "openstack"
trans_id = upload_descriptor(logger, ping_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should only be a single vnfd"
vnfd = vnfds[0]
trans_id = upload_descriptor(logger, pong_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "pong_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, ping_pong_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.name == "ping_pong_nsd"
def test_instantiate_ping_pong_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- rwnsr_proxy.merge_config('/ns-instance-config', nsr)
+ rwnsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
# assert False, "Did not find all ping and pong component in time"
#def test_terminate_ping_pong_ns(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- # nsr_configs = nsr_proxy.get_config('/ns-instance-config')
+ # nsr_configs = nsr_proxy.get_config('/rw-project:project[rw-project:name="default"]/ns-instance-config')
# nsr = nsr_configs.nsr[0]
# nsr_id = nsr.id
- # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id='{}']".format(nsr_id))
+ # nsr_configs = nsr_proxy.delete_config("/ns-instance-config/nsr[id={}]".format(quoted_key(nsr_id)))
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+)
logging.basicConfig(level=logging.DEBUG)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project='default'):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
-
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.short_name == "tg_2vrouter_ts_nsd"
def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwConmanYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-
-
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang as NsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-2Vrouter-TS EPA"
nsr.short_name = "TG-2Vrouter-TS EPA"
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
trans_id = upload_descriptor(logger, tg_2vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.short_name == "tg_2vrouter_ts_nsd"
def test_instantiate_tg_2vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
#!/usr/bin/env python
"""
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import gi
gi.require_version('RwIwpYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
-gi.require_version('RwVnfdYang', '1.0')
+gi.require_version('RwProjectVnfdYang', '1.0')
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwBaseYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwNsmYang', '1.0')
-from gi.repository import RwIwpYang, NsdYang, NsrYang, RwNsrYang, VldYang, RwVnfdYang, RwCloudYang, RwBaseYang, RwResourceMgrYang, RwConmanYang, RwNsmYang
+from gi.repository import (
+ RwIwpYang,
+ ProjectNsdYang,
+ NsrYang,
+ RwNsrYang,
+ VldYang,
+ RwProjectVnfdYang as RwVnfdYang,
+ RwCloudYang,
+ RwBaseYang,
+ RwResourceMgrYang,
+ RwConmanYang,
+ RwNsmYang
+ )
logging.basicConfig(level=logging.DEBUG)
def create_nsr_from_nsd_id(nsd_id):
- nsr = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "TG-Vrouter-TS-EPA-SRIOV"
nsr.short_name = "TG-Vrouter-TS-EPA-SRIOV"
pass
-def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1"):
+def wait_unboard_transaction_finished(logger, transaction_id, timeout_secs=600, host="127.0.0.1", project="default"):
logger.info("Waiting for onboard trans_id %s to complete",
transaction_id)
start_time = time.time()
while (time.time() - start_time) < timeout_secs:
r = requests.get(
- 'http://{host}:4567/api/upload/{t_id}/state'.format(
- host=host, t_id=transaction_id
+ 'http://{host}:8008/api/operational/project/{proj}/create-jobs/job/{t_id}'.format(
+ host=host, proj=project, t_id=transaction_id
)
)
state = r.json()
cloud_account.openstack.tenant = 'demo'
cloud_account.openstack.mgmt_network = 'private'
- cloud_proxy.merge_config("/rw-cloud:cloud-account", cloud_account)
+ cloud_proxy.merge_config("/rw-project:project/rw-cloud:cloud-account", cloud_account)
def test_configure_pools(self, resource_mgr_proxy):
- pools = RwResourceMgrYang.ResourcePools.from_dict({
+ pools = RwResourceMgrYang.YangData_RwProject_Project_ResourceMgrConfig_ResourcePools.from_dict({
"pools": [{ "name": "vm_pool_a",
"resource_type": "compute",
"pool_type" : "dynamic"},
"resource_type": "network",
"pool_type" : "dynamic",}]})
- resource_mgr_proxy.merge_config('/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
- def test_configure_resource_orchestrator(self, so_proxy):
- cfg = RwConmanYang.RoEndpoint.from_dict({'ro_ip_address': '127.0.0.1',
- 'ro_port' : 2022,
- 'ro_username' : 'admin',
- 'ro_password' : 'admin'})
- so_proxy.merge_config('/rw-conman:cm-config', cfg)
-
- def test_configure_service_orchestrator(self, nsm_proxy):
- cfg = RwNsmYang.SoEndpoint.from_dict({'cm_ip_address': '127.0.0.1',
- 'cm_port' : 2022,
- 'cm_username' : 'admin',
- 'cm_password' : 'admin'})
- nsm_proxy.merge_config('/rw-nsm:ro-config/rw-nsm:cm-endpoint', cfg)
+ resource_mgr_proxy.merge_config('/rw-project:project/rw-resource-mgr:resource-mgr-config/rw-resource-mgr:resource-pools', pools)
-
def test_onboard_tg_vnfd(self, logger, vnfd_proxy, tg_vnfd_package_file):
logger.info("Onboarding trafgen_vnfd package: %s", tg_vnfd_package_file)
trans_id = upload_descriptor(logger, tg_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 1, "There should be one vnfds"
assert "trafgen_vnfd" in [vnfds[0].name]
trans_id = upload_descriptor(logger, vrouter_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 2, "There should be two vnfds"
assert "vrouter_vnfd" in [vnfds[0].name, vnfds[1].name]
trans_id = upload_descriptor(logger, ts_vnfd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = vnfd_proxy.get_config('/vnfd-catalog')
+ catalog = vnfd_proxy.get_config('/rw-project:project[rw-project:name="default"]/vnfd-catalog')
vnfds = catalog.vnfd
assert len(vnfds) == 3, "There should be three vnfds"
assert "trafsink_vnfd" in [vnfds[0].name, vnfds[1].name, vnfds[2].name]
trans_id = upload_descriptor(logger, tg_vrouter_ts_nsd_package_file)
wait_unboard_transaction_finished(logger, trans_id)
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsds = catalog.nsd
assert len(nsds) == 1, "There should only be a single nsd"
nsd = nsds[0]
assert nsd.name == "tg_vrouter_ts_nsd"
def test_instantiate_tg_vrouter_ts_nsr(self, logger, nsd_proxy, nsr_proxy, rwnsr_proxy, base_proxy):
- catalog = nsd_proxy.get_config('/nsd-catalog')
+ catalog = nsd_proxy.get_config('/rw-project:project[rw-project:name="default"]/nsd-catalog')
nsd = catalog.nsd[0]
nsr = create_nsr_from_nsd_id(nsd.id)
- nsr_proxy.merge_config('/ns-instance-config', nsr)
+ nsr_proxy.merge_config('/rw-project:project[rw-project:name="default"]/ns-instance-config', nsr)
- nsr_opdata = rwnsr_proxy.get('/ns-instance-opdata')
+ nsr_opdata = rwnsr_proxy.get('/rw-project:project[rw-project:name="default"]/ns-instance-opdata')
nsrs = nsr_opdata.nsr
assert len(nsrs) == 1
assert nsrs[0].ns_instance_config_ref == nsr.id
"test_description":"Test targeting launchpad recovery feature",
"run_as_root": true,
"status":"broken",
- "keywords":["nightly","smoke"],
+ "keywords":["nightly"],
"timelimit": 4800,
"networks":[],
"vms":[
import unittest
import xmlrunner
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
from rift.mano.utils.compare_desc import CompareDescShell
import argparse
import asyncio
+import gi
import logging
import os
import sys
import time
import unittest
import uuid
-
import xmlrunner
import gi.repository.RwDts as rwdts
import rift.tasklets
import rift.test.dts
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
import mano_ut
class NsrDtsHandler(object):
""" The network service DTS handler """
- NSR_XPATH = "C,/nsr:ns-instance-config/nsr:nsr"
- SCALE_INSTANCE_XPATH = "C,/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
+ NSR_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr"
+ SCALE_INSTANCE_XPATH = "C,/rw-project:project/nsr:ns-instance-config/nsr:nsr/nsr:scaling-group/nsr:instance"
def __init__(self, dts, log, loop, nsm):
self._dts = dts
def get_scale_group_instances(self, nsr_id, group_name):
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
""" Register for Nsr create/update/delete/read requests from dts """
def nsr_id_from_keyspec(ks):
- nsr_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
+ nsr_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr.schema().keyspec_to_entry(ks)
nsr_id = nsr_path_entry.key00.id
return nsr_id
def group_name_from_keyspec(ks):
- group_path_entry = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
+ group_path_entry = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup.schema().keyspec_to_entry(ks)
group_name = group_path_entry.key00.scaling_group_name_ref
return group_name
class XPaths(object):
@staticmethod
def nsr_config(nsr_id=None):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
- ("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else ""))
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
+ ("[nsr:id={}]".format(quoted_key(nsr_id)) if nsr_id is not None else ""))
def scaling_group_instance(nsr_id, group_name, instance_id):
- return ("C,/nsr:ns-instance-config/nsr:nsr" +
- "[nsr:id='{}']".format(nsr_id) +
+ return ("C,/rw-project:project/nsr:ns-instance-config/nsr:nsr" +
+ "[nsr:id={}]".format(quoted_key(nsr_id)) +
"/nsr:scaling-group" +
- "[nsr:scaling-group-name-ref='{}']".format(group_name) +
+ "[nsr:scaling-group-name-ref={}]".format(quoted_key(group_name)) +
"/nsr:instance" +
- "[nsr:id='{}']".format(instance_id)
+ "[nsr:id={}]".format(quoted_key(instance_id))
)
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr1_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr1_uuid, name="fu"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_update(
XPaths.scaling_group_instance(nsr1_uuid, "group", 1234),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=1234),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_create(
XPaths.scaling_group_instance(nsr1_uuid, "group", 12345),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_ScalingGroup_Instance(id=12345),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
block = xact.block_create()
block.add_query_update(
XPaths.nsr_config(nsr2_uuid),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr(id=nsr2_uuid, name="fu2"),
flags=rwdts.XactFlag.ADVISE | rwdts.XactFlag.TRACE,
)
yield from block.execute(now=True)
import types
import unittest
import uuid
+import os
+import xmlrunner
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
import rift.test.dts
import rift.tasklets.rwnsmtasklet.cloud as cloud
+import rift.tasklets.rwnsmtasklet.rwnsmplugin as rwnsmplugin
import rift.tasklets.rwnsmtasklet.openmano_nsm as openmano_nsm
+from rift.mano.utils.project import ManoProject
import rw_peas
import gi
-gi.require_version('RwDtsYang', '1.0')
+gi.require_version('RwDts', '1.0')
from gi.repository import (
- RwLaunchpadYang as launchpadyang,
+ RwRoAccountYang as roaccountyang,
RwDts as rwdts,
- RwVnfdYang,
+ RwProjectVnfdYang as RwVnfdYang,
RwVnfrYang,
RwNsrYang,
- RwNsdYang,
- VnfrYang
+ RwProjectNsdYang as RwNsdYang,
+ VnfrYang,
)
self.log = log
self.loop = loop
self.dts = dts
-
self._registrations = []
+ @asyncio.coroutine
+ def update(self, xpath, desc):
+ self._registrations[-1].update_element(xpath, desc)
+
+ @asyncio.coroutine
+ def delete(self, xpath):
+ self._registrations[-1].delete_element(xpath)
+
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
)
self.log.debug("Registering path: %s, obj:%s", w_path, desc)
+
reg = yield from self.dts.register(
w_path,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
+
self._registrations.append(reg)
self.log.debug("Registered path : %s", w_path)
yield from ready_event.wait()
class RoAccountDtsTestCase(rift.test.dts.AbstractDTSTest):
@classmethod
def configure_schema(cls):
- return launchpadyang.get_schema()
+ return roaccountyang.get_schema()
@classmethod
def configure_timeout(cls):
self.log.debug("STARTING - %s", test_id)
self.tinfo = self.new_tinfo(str(test_id))
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
+ self.project = ManoProject(self.log)
self.tinfo_sub = self.new_tinfo(str(test_id) + "_sub")
self.dts_sub = rift.tasklets.DTS(self.tinfo_sub, self.schema, self.loop)
@rift.test.dts.async_test
def test_orch_account_create(self):
- orch = cloud.ROAccountPluginSelector(self.dts, self.log, self.loop, None)
-
- yield from orch.register()
-
+ ro_cfg_sub = cloud.ROAccountConfigSubscriber(self.dts, self.log, self.loop, self.project, None)
+ yield from ro_cfg_sub.register()
+
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name=None)
# Test if we have a default plugin in case no RO is specified.
- assert type(orch.ro_plugin) is cloud.RwNsPlugin
- mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
- {'name': 'rift-ro', 'account_type': 'rift_ro', 'rift_ro': {'rift_ro': True}})
+ assert type(ro_plugin) is rwnsmplugin.RwNsPlugin
# Test rift-ro plugin CREATE
- w_xpath = "C,/rw-launchpad:resource-orchestrator"
- xpath = w_xpath
- yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
- yield from asyncio.sleep(5, loop=self.loop)
-
- assert type(orch.ro_plugin) is cloud.RwNsPlugin
+ w_xpath = self.project.add_project("C,/rw-ro-account:ro-account/rw-ro-account:account")
+ xpath = w_xpath + "[rw-ro-account:name='openmano']"
# Test Openmano plugin CREATE
- mock_orch_acc = launchpadyang.ResourceOrchestrator.from_dict(
+ mock_orch_acc = roaccountyang.YangData_RwProject_Project_RoAccount_Account.from_dict(
{'name': 'openmano',
- 'account_type': 'openmano',
+ 'ro_account_type': 'openmano',
'openmano': {'tenant_id': "abc",
"port": 9999,
"host": "10.64.11.77"}})
+
yield from self.publisher.publish(w_xpath, xpath, mock_orch_acc)
yield from asyncio.sleep(5, loop=self.loop)
-
- assert type(orch.ro_plugin) is openmano_nsm.OpenmanoNsPlugin
- assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
- assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name='openmano')
+ assert type(ro_plugin) is openmano_nsm.OpenmanoNsPlugin
# Test update
mock_orch_acc.openmano.port = 9789
mock_orch_acc.openmano.host = "10.64.11.78"
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
- rwdts.XactFlag.ADVISE, mock_orch_acc)
- assert orch.ro_plugin._cli_api._port == mock_orch_acc.openmano.port
- assert orch.ro_plugin._cli_api._host == mock_orch_acc.openmano.host
-
- # Test update when a live instance exists
- # Exception should be thrown
- orch.handle_nsr(None, rwdts.QueryAction.CREATE)
- mock_orch_acc.openmano.port = 9788
-
- with self.assertRaises(Exception):
- yield from self.dts.query_update("C,/rw-launchpad:resource-orchestrator",
- rwdts.XactFlag.ADVISE, mock_orch_acc)
+ yield from self.publisher.update(xpath, mock_orch_acc)
+ yield from asyncio.sleep(5, loop=self.loop)
- # Test delete
- yield from self.dts.query_delete("C,/rw-launchpad:resource-orchestrator",
- flags=rwdts.XactFlag.ADVISE)
- assert orch.ro_plugin == None
+ #Since update means delete followed by a insert get the new ro_plugin.
+ ro_plugin = ro_cfg_sub.get_ro_plugin(account_name='openmano')
+ assert ro_plugin._cli_api._port == mock_orch_acc.openmano.port
+ assert ro_plugin._cli_api._host == mock_orch_acc.openmano.host
+ # Test delete to be implemented. right now facing some dts issues.
+ # Use DescriptorPublisher delete for deletion
def main(argv=sys.argv[1:]):
# when this is called from the interpreter).
unittest.main(
argv=[__file__] + argv,
- testRunner=None#xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
+ testRunner=xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
)
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
#!/usr/bin/env python3
-#
+#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
UnknownAccountError,
)
import rw_peas
+from rift.mano.utils.project import ManoProject, DEFAULT_PROJECT
class wait_for_pending_tasks(object):
def make_nsr(ns_instance_config_ref=str(uuid.uuid4())):
- nsr = NsrYang.YangData_Nsr_NsInstanceOpdata_Nsr()
+ nsr = NsrYang.YangData_RwProject_Project_NsInstanceOpdata_Nsr()
nsr.ns_instance_config_ref = ns_instance_config_ref
return nsr
def make_vnfr(id=str(uuid.uuid4())):
- vnfr = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
+ vnfr = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
vnfr.id = id
return vnfr
def make_vdur(id=str(uuid.uuid4()), vim_id=str(uuid.uuid4())):
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.id = id
vdur.vim_id = vim_id
return vdur
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwmonYang.NfviMetrics()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
metrics.vcpu.utilization = 0.5
return metrics
self.loop = asyncio.new_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
mock = self.plugin_manager.plugin(self.account.name)
mock.set_impl(TestNfviMetricsCache.Plugin())
- self.vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ self.vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
self.vdur.id = "test-vdur-id"
self.vdur.vim_id = "test-vim-id"
self.vdur.vm_flavor.vcpu_count = 4
return True
def nfvi_metrics(self, account, vim_id):
- metrics = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
+ metrics = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_NfviMetrics()
metrics.vcpu.utilization = 0.5
return None, metrics
def setUp(self):
self.loop = asyncio.new_event_loop()
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
self._alarms = set()
def nfvi_metrics(self, account, vm_id):
- return rwmon.NfviMetrics()
+ return rwmon.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics_available(self, account):
return True
self.loop = asyncio.new_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
def test_retrieve(self):
pass
+ @unittest.skip("Alarms are being disabled in monitor")
def test_alarm_create_and_destroy(self):
- alarm = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur_Alarms()
+ alarm = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur_Alarms()
alarm.name = "test-alarm"
alarm.description = "test-description"
alarm.vdur_id = "test-vdur-id"
# return a VCPU utilization of 0.5.
class MockPlugin(object):
def __init__(self):
- self.metrics = RwmonYang.NfviMetrics()
+ self.metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics(self, account, vim_id):
self.metrics.vcpu.utilization = 0.5
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger('test-logger')
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
def setUp(self):
self.logger = logging.getLogger('test-logger')
self.plugins = NfviMetricsPluginManager(self.logger)
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
self.loop = asyncio.get_event_loop()
self.logger = logging.getLogger('test-logger')
+ self.project = ManoProject(self.logger, name=DEFAULT_PROJECT)
self.config = InstanceConfiguration()
- self.monitor = Monitor(self.loop, self.logger, self.config)
+ self.monitor = Monitor(self.loop, self.logger, self.config, self.project)
- self.account = RwcalYang.CloudAccount(
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList(
name='test-cloud-account',
account_type="mock",
)
self.monitor.add_cloud_account(self.account)
# Create a VNFR associated with the cloud account
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
# Add a VDUR to the VNFR
to retrieve the NFVI metrics associated with the VDU.
"""
# Define the VDUR to be registered
- vdur = VnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = VnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vm_flavor.vcpu_count = 4
vdur.vm_flavor.memory_mb = 100
vdur.vm_flavor.storage_gb = 2
the VDURs contained in the VNFR are unregistered.
"""
# Define the VDUR to be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-1'
vdur.id = 'test-vdur-id-1'
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
vnfr.vdur.append(vdur)
# Add another VDUR to the VNFR and update the monitor. Both VDURs
# should now be registered
- vdur = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr_Vdur()
+ vdur = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr_Vdur()
vdur.vim_id = 'test-vim-id-2'
vdur.id = 'test-vdur-id-2'
Monitor.
"""
# Create the VNFR
- vnfr = RwVnfrYang.YangData_Vnfr_VnfrCatalog_Vnfr()
- vnfr.cloud_account = self.account.name
+ vnfr = RwVnfrYang.YangData_RwProject_Project_VnfrCatalog_Vnfr()
+ vnfr.datacenter = self.account.name
vnfr.id = 'test-vnfr-id'
# Create 2 VDURs
class MockPlugin(object):
def __init__(self):
self._metrics = dict()
- self._metrics['test-vim-id-1'] = RwmonYang.NfviMetrics()
- self._metrics['test-vim-id-2'] = RwmonYang.NfviMetrics()
+ self._metrics['test-vim-id-1'] = RwmonYang.YangData_RwProject_Project_NfviMetrics()
+ self._metrics['test-vim-id-2'] = RwmonYang.YangData_RwProject_Project_NfviMetrics()
def nfvi_metrics(self, account, vim_id):
metrics = self._metrics[vim_id]
#!/usr/bin/env python3
-#
-# Copyright 2016 RIFT.IO Inc
+#
+# Copyright 2016-17 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import uuid
import xmlrunner
+import gi
+gi.require_version('ProjectNsdYang', '1.0')
+gi.require_version('NsrYang', '1.0')
+
+#Setting RIFT_VAR_ROOT if not already set for unit test execution
+if "RIFT_VAR_ROOT" not in os.environ:
+ os.environ['RIFT_VAR_ROOT'] = os.path.join(os.environ['RIFT_INSTALL'], 'var/rift/unittest')
+
from gi.repository import (
- NsdYang,
- NsrYang,
- )
+ ProjectNsdYang,
+ NsrYang,
+)
+
logger = logging.getLogger('test-rwnsmtasklet')
import rift.tasklets.rwnsmtasklet.rwnsmtasklet as rwnsmtasklet
import rift.tasklets.rwnsmtasklet.xpath as rwxpath
+from rift.mano.utils.project import ManoProject
+
+
+def prefix_project(xpath):
+ return "/rw-project:project" + xpath
class TestGiXpath(unittest.TestCase):
def setUp(self):
"""
# Create the initial NSD catalog
- nsd_catalog = NsdYang.YangData_Nsd_NsdCatalog()
+ nsd_catalog = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog()
# Create an NSD, set its 'id', and add it to the catalog
nsd_id = str(uuid.uuid4())
nsd_catalog.nsd.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd(
id=nsd_id,
)
)
# Retrieve the NSD using and xpath expression
- xpath = '/nsd:nsd-catalog/nsd:nsd[nsd:id={}]'.format(nsd_id)
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd[project-nsd:id={}]'.
+ format(nsd_id))
nsd = rwxpath.getxattr(nsd_catalog, xpath)
self.assertEqual(nsd_id, nsd.id)
# Modified the name of the NSD using an xpath expression
- rwxpath.setxattr(nsd_catalog, xpath + "/nsd:name", "test-name")
+ rwxpath.setxattr(nsd_catalog, xpath + "/project-nsd:name", "test-name")
- name = rwxpath.getxattr(nsd_catalog, xpath + "/nsd:name")
+ name = rwxpath.getxattr(nsd_catalog, xpath + "/project-nsd:name")
self.assertEqual("test-name", name)
def test_nsd_scalar_fields(self):
"""
# Define a simple NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+
+ xpath = prefix_project('/project-nsd:nsd-catalog/project-nsd:nsd')
# Check that the unset fields are in fact set to None
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(None, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(None, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
# Set the values of the 'name' and 'short-name' fields
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name", "test-name")
- rwxpath.setxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name", "test-short-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:name", "test-name")
+ rwxpath.setxattr(nsd, xpath + "/project-nsd:short-name", "test-short-name")
# Check that the 'name' and 'short-name' fields are correctly set
- self.assertEqual(nsd.name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:name"))
- self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, "/nsd:nsd-catalog/nsd:nsd/nsd:short-name"))
+ self.assertEqual(nsd.name, rwxpath.getxattr(nsd, xpath + "/project-nsd:name"))
+ self.assertEqual(nsd.short_name, rwxpath.getxattr(nsd, xpath + "/project-nsd:short-name"))
class TestInputParameterSubstitution(unittest.TestCase):
def setUp(self):
- self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger)
+ project = ManoProject(logger)
+ self.substitute_input_parameters = rwnsmtasklet.InputParameterSubstitution(logger, project)
def test_null_arguments(self):
"""
config, no exception should be raised.
"""
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
self.substitute_input_parameters(None, None)
self.substitute_input_parameters(nsd, None)
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
nsd.name = "robert"
nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.append(
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
)
)
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="alice",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="alice",
),
"""
# Define the original NSD
- nsd = NsdYang.YangData_Nsd_NsdCatalog_Nsd()
- nsd.name = "robert"
- nsd.short_name = "bob"
+ nsd = ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd()
+ # nsd.name = "robert"
+ # nsd.short_name = "bob"
# Define which parameters may be modified
nsd.input_parameter_xpath.extend([
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
label="NSD Name",
),
- NsdYang.YangData_Nsd_NsdCatalog_Nsd_InputParameterXpath(
+ ProjectNsdYang.YangData_RwProject_Project_NsdCatalog_Nsd_InputParameterXpath(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
label="NSD Short Name",
),
])
# Define the input parameters that are intended to be modified
- nsr_config = NsrYang.YangData_Nsr_NsInstanceConfig_Nsr()
+ nsr_config = NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr()
nsr_config.input_parameter.extend([
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:name",
value="robert",
),
- NsrYang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter(
+ NsrYang.YangData_RwProject_Project_NsInstanceConfig_Nsr_InputParameter(
xpath="/nsd:nsd-catalog/nsd:nsd/nsd:short-name",
value="bob",
),
# limitations under the License.
#
-
+import argparse
import asyncio
+import gi
+import logging
import os
import sys
+import time
+import types
import unittest
import uuid
import xmlrunner
-import argparse
-import logging
-import time
-import types
-import gi
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
RwConfigAgentYang as rwcfg_agent,
RwlogMgmtYang
)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
from gi.repository.RwTypes import RwStatus
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
"""
Creates an object for class RwcalYang.Clo
"""
- account = rwcloudyang.CloudAccount()
+ account = rwcloudyang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
@asyncio.coroutine
def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
account = self.get_cal_account(cloud_type, cloud_name)
- account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
+ account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name={}]".format(quoted_key(cloud_name))
self.log.info("Configuring cloud-account: %s", account)
yield from dts.query_create(account_xpath,
rwdts.XactFlag.ADVISE,
cmake_minimum_required(VERSION 2.8)
-set(PKG_NAME rwmon)
-set(PKG_VERSION 1.0)
-set(PKG_RELEASE 1)
-set(PKG_LONG_NAME ${PKG_NAME}-${PKG_VERSION})
-
set(subdirs plugins test)
rift_add_subdirs(SUBDIR_LIST ${subdirs})
#
-# Copyright 2016 RIFT.IO Inc
+# Copyright 2016-2017 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
VALA_PACKAGES
rw_types-1.0 rw_yang-1.0 rw_keyspec-1.0 rw_yang_pb-1.0 rw_schema_proto-1.0
rw_log_yang-1.0 rw_base_yang-1.0 rwmon_yang-1.0 rw_manifest_yang-1.0 protobuf_c-1.0 ietf_netconf_yang-1.0
- rw_log-1.0 rwcal_yang-1.0
+ rw_log-1.0 rwcal_yang-1.0 rw_project_yang-1.0 rw_user_yang-1.0 rw_rbac_base_yang-1.0
VAPI_DIRS ${RIFT_SUBMODULE_BINARY_ROOT}/rwmon/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/rwcal/plugins/yang
${RIFT_SUBMODULE_BINARY_ROOT}/models/plugins/yang
GENERATE_VAPI_FILE ${VALA_LONG_NAME}.vapi
GENERATE_GIR_FILE ${VALA_TYPELIB_PREFIX}.gir
GENERATE_TYPELIB_FILE ${VALA_TYPELIB_PREFIX}.typelib
- DEPENDS rwmon_yang rwcal_yang rwlog_gi rwschema_yang
+ DEPENDS rwmon_yang rwcal_yang
)
rift_install_vala_artifacts(
VAPI_FILES ${VALA_LONG_NAME}.vapi
GIR_FILES ${VALA_TYPELIB_PREFIX}.gir
TYPELIB_FILES ${VALA_TYPELIB_PREFIX}.typelib
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEST_PREFIX .
)
* @return RwStatus
*/
public abstract RwTypes.RwStatus nfvi_metrics(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id,
- out Rwmon.NfviMetrics metrics);
+ out Rwmon.YangData_RwProject_Project_NfviMetrics metrics);
/**
* nfvi_vcpu_metrics
* @return RwStatus
*/
public abstract RwTypes.RwStatus nfvi_vcpu_metrics(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id,
- out Rwmon.NfviMetrics_Vcpu metrics);
+ out Rwmon.YangData_RwProject_Project_NfviMetrics_Vcpu metrics);
/**
* nfvi_memory_metrics
* @return RwStatus
*/
public abstract RwTypes.RwStatus nfvi_memory_metrics(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id,
- out Rwmon.NfviMetrics_Memory metrics);
+ out Rwmon.YangData_RwProject_Project_NfviMetrics_Memory metrics);
/**
* nfvi_storage_metrics
* @return RwStatus
*/
public abstract RwTypes.RwStatus nfvi_storage_metrics(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vm_id,
- out Rwmon.NfviMetrics_Storage metrics);
+ out Rwmon.YangData_RwProject_Project_NfviMetrics_Storage metrics);
/**
* nfvi_metrics_available
* @return RwStatus
*/
public abstract RwTypes.RwStatus nfvi_metrics_available(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
out bool present);
/**
* @return RwStatus
*/
public abstract RwTypes.RwStatus alarm_create(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string vim_id,
- ref Rwmon.Alarm alarm);
+ ref Rwmon.YangData_RwProject_Project_Alarm alarm);
/**
* alarm_update
* @return RwStatus
*/
public abstract RwTypes.RwStatus alarm_update(
- Rwcal.CloudAccount account,
- Rwmon.Alarm alarm);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ Rwmon.YangData_RwProject_Project_Alarm alarm);
/**
* alarm_delete
* @return RwStatus
*/
public abstract RwTypes.RwStatus alarm_delete(
- Rwcal.CloudAccount account,
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
string alarm_id);
/**
* @return RwStatus
*/
public abstract RwTypes.RwStatus alarm_list(
- Rwcal.CloudAccount account,
- out Rwmon.Alarm[] alarms);
+ Rwcal.YangData_RwProject_Project_CloudAccounts_CloudAccountList account,
+ out Rwmon.YangData_RwProject_Project_Alarm[] alarms);
}
}
include(rift_plugin)
-rift_install_python_plugin(rwmon_ceilometer rwmon_ceilometer.py)
+rift_install_gobject_python_plugin(rwmon_ceilometer rwmon_ceilometer.py COMPONENT ${INSTALL_COMPONENT})
try:
samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
- metrics = RwmonYang.NfviMetrics()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
vcpu = samples.get("cpu_util", {})
memory = samples.get("memory_usage", {})
try:
samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
- metrics = RwmonYang.NfviMetrics_Vcpu()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Vcpu()
metrics.utilization = samples.get("cpu_util", 0)
return metrics
try:
samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
- metrics = RwmonYang.NfviMetrics_Memory()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Memory()
metrics.used = samples.get("memory_usage", 0)
return metrics
try:
samples = self._get_driver(account).ceilo_nfvi_metrics(vm_id)
- metrics = RwmonYang.NfviMetrics_Storage()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Storage()
metrics.used = samples.get("disk_usage", 0)
return metrics
"""Create an OpenstackAuthTokenV2 using account information
Arguments:
- account - an RwcalYang.CloudAccount object
+ account - an RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList object
Returns:
an openstack token
"""Create an OpenstackAuthTokenV3 using account information
Arguments:
- account - an RwcalYang.CloudAccount object
+ account - an RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList object
Returns:
an openstack token
include(rift_plugin)
-rift_install_python_plugin(rwmon_mock rwmon_mock.py)
+rift_install_gobject_python_plugin(rwmon_mock rwmon_mock.py COMPONENT ${INSTALL_COMPONENT})
class NullImpl(object):
def nfvi_metrics(self, account, vm_id):
- return rwmon.NfviMetrics()
+ return rwmon.YangData_RwProject_Project_NfviMetrics()
def nfvi_vcpu_metrics(self, account, vm_id):
- return rwmon.NfviMetrics_Vcpu()
+ return rwmon.YangData_RwProject_Project_NfviMetrics_Vcpu()
def nfvi_memory_metrics(self, account, vm_id):
- return rwmon.NfviMetrics_Memory()
+ return rwmon.YangData_RwProject_Project_NfviMetrics_Memory()
def nfvi_storage_metrics(self, account, vm_id):
- return rwmon.NfviMetrics_Storage()
+ return rwmon.YangData_RwProject_Project_NfviMetrics_Storage()
def nfvi_metrics_available(self, account):
return True
rift_add_yang_target(
TARGET rwmon_yang
YANG_FILES ${source_yang_files}
- COMPONENT ${PKG_LONG_NAME}
+ COMPONENT ${INSTALL_COMPONENT}
DEPENDS
mano-types_yang
LIBRARIES
/*
*
- * Copyright 2016 RIFT.IO Inc
+ * Copyright 2016-2017 RIFT.IO Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
prefix rwbase;
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-yang-types {
prefix "rwt";
}
prefix "manotypes";
}
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ revision 2017-02-08 {
+ description
+ "Update model to support projects.";
+ }
+
revision 2015-10-28 {
description
"Initial revision.";
"RIFT monitoring";
}
- container nfvi-metrics {
- rwpb:msg-new NfviMetrics;
-
- leaf timestamp {
- description
+ augment "/rw-project:project" {
+ container nfvi-metrics {
+ leaf timestamp {
+ description
"This is the time when the metric was captured. The timestamp is
represented as the number of seconds since the beginning of the Unix
epoch.";
- type decimal64 {
- fraction-digits 3;
+ type decimal64 {
+ fraction-digits 3;
+ }
}
- }
-
- uses manotypes:nfvi-metrics;
- }
- container alarm {
- rwpb:msg-new Alarm;
+ uses manotypes:nfvi-metrics;
+ }
- uses manotypes:alarm;
+ container alarm {
+ uses manotypes:alarm;
+ }
}
}
plugin = rw_peas.PeasPlugin("rwmon_mock", 'RwMon-1.0')
self.plugin = plugin.get_interface("Monitoring")
- self.account = RwcalYang.CloudAccount()
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
self.vim_id = "test-vim-id"
def test_null_data_source(self):
"""
status, metrics = self.plugin.nfvi_metrics(self.account, self.vim_id)
self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
- self.assertEqual(metrics, RwmonYang.NfviMetrics())
+ self.assertEqual(metrics, RwmonYang.YangData_RwProject_Project_NfviMetrics())
status, metrics = self.plugin.nfvi_vcpu_metrics(self.account, self.vim_id)
self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
- self.assertEqual(metrics, RwmonYang.NfviMetrics_Vcpu())
+ self.assertEqual(metrics, RwmonYang.YangData_RwProject_Project_NfviMetrics_Vcpu())
status, metrics = self.plugin.nfvi_memory_metrics(self.account, self.vim_id)
self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
- self.assertEqual(metrics, RwmonYang.NfviMetrics_Memory())
+ self.assertEqual(metrics, RwmonYang.YangData_RwProject_Project_NfviMetrics_Memory())
status, metrics = self.plugin.nfvi_storage_metrics(self.account, self.vim_id)
self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
- self.assertEqual(metrics, RwmonYang.NfviMetrics_Storage())
+ self.assertEqual(metrics, RwmonYang.YangData_RwProject_Project_NfviMetrics_Storage())
status, result = self.plugin.nfvi_metrics_available(self.account)
self.assertEqual(status, RwTypes.RwStatus.SUCCESS)
self.plugin = plugin.get_interface("Monitoring")
self.plugin.set_impl(MockDataSource())
- self.account = RwcalYang.CloudAccount()
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
self.vim_id = "test-vim-id"
def test_mock_data_source(self):
are indeed returned.
"""
- expected_vcpu_metrics = RwmonYang.NfviMetrics_Vcpu()
+ expected_vcpu_metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Vcpu()
expected_vcpu_metrics.utilization = 50.0
expected_vcpu_metrics.total = 100
self.assertEqual(metrics.total, expected_vcpu_metrics.total)
self.assertEqual(metrics.utilization, expected_vcpu_metrics.utilization)
- expected_memory_metrics = RwmonYang.NfviMetrics_Memory()
+ expected_memory_metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Memory()
expected_memory_metrics.used = 90
expected_memory_metrics.total = 100
expected_memory_metrics.utilization = 90/100
self.assertEqual(metrics.total, expected_memory_metrics.total)
self.assertEqual(metrics.utilization, expected_memory_metrics.utilization)
- expected_storage_metrics = RwmonYang.NfviMetrics_Storage()
+ expected_storage_metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Storage()
expected_storage_metrics.used = 300
expected_storage_metrics.total = 500
expected_storage_metrics.utilization = 300/500
self.plugin = plugin.get_interface("Monitoring")
self.plugin.set_impl(self.mock)
- self.account = RwcalYang.CloudAccount()
- self.alarm = RwmonYang.Alarm(name='test-alarm')
+ self.account = RwcalYang.YangData_RwProject_Project_CloudAccounts_CloudAccountList()
+ self.alarm = RwmonYang.YangData_RwProject_Project_Alarm(name='test-alarm')
self.vim_id = 'test-vim-id'
def test(self):
self.assertEqual(0, len(alarms))
# Create two alarms
- self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.Alarm())
- self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.Alarm())
+ self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.YangData_RwProject_Project_Alarm())
+ self.plugin.do_alarm_create(self.account, self.vim_id, RwmonYang.YangData_RwProject_Project_Alarm())
_, alarms = self.plugin.do_alarm_list(self.account)
self.assertEqual(2, len(alarms))
"""
def nfvi_metrics(self, account, vm_id):
- metrics = RwmonYang.NfviMetrics()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics()
metrics.vcpu = self.nfvi_vcpu_metrics(account, vm_id)
metrics.memory = self.nfvi_memory_metrics(account, vm_id)
metrics.storage = self.nfvi_storage_metrics(account, vm_id)
return metrics
def nfvi_vcpu_metrics(self, account, vm_id):
- metrics = RwmonYang.NfviMetrics_Vcpu()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Vcpu()
metrics.total = 100
metrics.utilization = 50.0
return metrics
def nfvi_memory_metrics(self, account, vm_id):
- metrics = RwmonYang.NfviMetrics_Memory()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Memory()
metrics.used = 90
metrics.total = 100
metrics.utilization = 90/100
return metrics
def nfvi_storage_metrics(self, account, vm_id):
- metrics = RwmonYang.NfviMetrics_Storage()
+ metrics = RwmonYang.YangData_RwProject_Project_NfviMetrics_Storage()
metrics.used = 300
metrics.total = 500
metrics.utilization = 300/500
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+ plugins
+ )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+set(subdirs
+ yang
+ rwprojectmano
+ )
+rift_add_subdirs(SUBDIR_LIST ${subdirs})
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+include(rift_plugin)
+
+set(TASKLET_NAME rwprojectmano)
+
+##
+# This function creates an install target for the plugin artifacts
+##
+rift_install_gobject_python_plugin(
+ ${TASKLET_NAME} ${TASKLET_NAME}.py
+ COMPONENT ${INSTALL_COMPONENT}
+ )
+
+# Workaround RIFT-6485 - rpmbuild defaults to python2 for
+# anything not in a site-packages directory so we have to
+# install the plugin implementation in site-packages and then
+# import it from the actual plugin.
+rift_python_install_tree(
+ FILES
+ rift/tasklets/${TASKLET_NAME}/__init__.py
+ rift/tasklets/${TASKLET_NAME}/tasklet.py
+ rift/tasklets/${TASKLET_NAME}/projectmano.py
+ rift/tasklets/${TASKLET_NAME}/rolesmano.py
+ COMPONENT ${INSTALL_COMPONENT}
+ PYTHON3_ONLY)
--- /dev/null
+from .tasklet import ProjectMgrManoTasklet
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+import asyncio
+import gi
+
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwProjectManoYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+ RwProjectManoYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+import rift.tasklets
+from rift.mano.utils.project import (
+ NS_PROJECT,
+ get_add_delete_update_cfgs,
+ ProjectConfigCallbacks,
+)
+
+
+MANO_PROJECT_ROLES = [
+ { 'mano-role':"rw-project-mano:catalog-oper",
+ 'description':("The catalog-oper Role has read permission to nsd-catalog "
+ "and vnfd-catalog under specific Projects, "
+ "as identified by /rw-project:project/rw-project:name. The "
+ "catatlog-oper Role may also have execute permission to specific "
+ "non-mutating RPCs. This Role is intended for read-only access to "
+ "catalogs under a specific project.") },
+
+ { 'mano-role':"rw-project-mano:catalog-admin",
+ 'description':("The catalog-admin Role has full CRUDX permissions to vnfd and nsd "
+ "catalogs under specific Projects, as identified by "
+ "/rw-project:project/rw-project:name.") },
+
+ { 'mano-role':"rw-project-mano:lcm-oper",
+ 'description':("The lcm-oper Role has read permission to the VL, VNF and NS "
+ "records within a Project. The lcm-oper Role may also have "
+ "execute permission to specific non-mutating RPCs.") },
+
+ { 'mano-role':"rw-project-mano:lcm-admin",
+ 'description':("The lcm-admin Role has full CRUDX permissions to the VL, VNF "
+ "and NS records within a Project. The lcm-admin Role does "
+ "not provide general CRUDX permissions to the Project as a whole, "
+ "nor to the RIFT.ware platform in general.") },
+
+ { 'mano-role':"rw-project-mano:account-admin",
+ 'description':("The account-admin Role has full CRUDX permissions to the VIM, SDN, VCA "
+ "and RO accounts within a Project. The account-admin Role does "
+ "not provide general CRUDX permissions to the Project as a whole, "
+ "nor to the RIFT.ware platform in general.") },
+
+ { 'mano-role':"rw-project-mano:account-oper",
+ 'description':("The account-oper Role has read permission to the VIM, SDN, VCA "
+ "and RO accounts within a Project. The account-oper Role may also have "
+ "execute permission to specific non-mutating RPCs.") },
+]
+
+
+class ProjectDtsHandler(object):
+ XPATH = "C,/{}".format(NS_PROJECT)
+
+ def __init__(self, dts, log, callbacks):
+ self._dts = dts
+ self._log = log
+ self._callbacks = callbacks
+
+ self.reg = None
+ self.projects = []
+
+ @property
+ def log(self):
+ return self._log
+
+ @property
+ def dts(self):
+ return self._dts
+
+ def get_reg_flags(self):
+ return (rwdts.Flag.SUBSCRIBER |
+ rwdts.Flag.DELTA_READY |
+ rwdts.Flag.CACHE |
+ rwdts.Flag.DATASTORE)
+
+ def add_project(self, cfg):
+ name = cfg.name
+ self._log.info("Adding project: {}".format(name))
+
+ if name not in self.projects:
+ self._callbacks.on_add_apply(name, cfg)
+ self.projects.append(name)
+ else:
+ self._log.error("Project already present: {}".
+ format(name))
+
+ def delete_project(self, name):
+ self._log.info("Deleting project: {}".format(name))
+ if name in self.projects:
+ self._callbacks.on_delete_apply(name)
+ self.projects.remove(name)
+ else:
+ self._log.error("Unrecognized project: {}".
+ format(name))
+
+ def update_project(self, cfg):
+ """ Update an existing project
+
+ Currently, we do not take any action on MANO for this,
+ so no callbacks are defined
+
+ Arguments:
+ msg - The project config message
+ """
+ name = cfg.name
+ self._log.info("Updating project: {}".format(name))
+ if name in self.projects:
+ pass
+ else:
+ self._log.error("Unrecognized project: {}".
+ format(name))
+
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got project apply config (xact: %s) (action: %s)", xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.info("Project {} being re-added after restart.".
+ format(cfg.name))
+ self.add_project(cfg)
+ else:
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="name",
+ )
+
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_project(cfg.name)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.add_project(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_project(cfg)
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+ name = msg.name
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ name, xact_info.query_action, msg)
+
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if name in self.projects:
+ self._log.debug("Project {} already exists. Ignore request".
+ format(name))
+
+ else:
+ self._log.debug("Project {}: Invoking on_prepare add request".
+ format(name))
+ rc, err_msg = yield from self._callbacks.on_add_prepare(name, msg)
+ if rc is False:
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ ProjectDtsHandler.XPATH,
+ err_msg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the entire project got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if name in self.projects:
+ rc, delete_msg = yield from self._callbacks.on_delete_prepare(name)
+ if not rc:
+ self._log.error("Project {} should not be deleted. Reason : {}".
+ format(name, delete_msg))
+
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ ProjectDtsHandler.XPATH,
+ delete_msg)
+
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+ else:
+ self._log.warning("Delete on unknown project: {}".
+ format(name))
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ self._log.debug("Registering for project config using xpath: %s",
+ ProjectDtsHandler.XPATH)
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=ProjectDtsHandler.XPATH,
+ flags=self.get_reg_flags(),
+ on_prepare=on_prepare,
+ )
+
+
+class ProjectHandler(object):
+ def __init__(self, tasklet, project_class):
+ self._tasklet = tasklet
+ self._log = tasklet.log
+ self._log_hdl = tasklet.log_hdl
+ self._dts = tasklet.dts
+ self._loop = tasklet.loop
+ self._class = project_class
+
+ self.mano_roles = [role['mano-role'] for role in MANO_PROJECT_ROLES]
+
+ self._log.debug("Creating project config handler")
+ self.project_cfg_handler = ProjectDtsHandler(
+ self._dts, self._log,
+ ProjectConfigCallbacks(
+ on_add_apply=self.on_project_added,
+ on_add_prepare=self.on_add_prepare,
+ on_delete_apply=self.on_project_deleted,
+ on_delete_prepare=self.on_delete_prepare,
+ )
+ )
+
+ def _get_tasklet_name(self):
+ return self._tasklet.tasklet_info.instance_name
+
+ def _get_project(self, name):
+ try:
+ proj = self._tasklet.projects[name]
+ except Exception as e:
+ self._log.exception("Project {} ({})not found for tasklet {}: {}".
+ format(name, list(self._tasklet.projects.keys()),
+ self._get_tasklet_name(), e))
+ raise e
+
+ return proj
+
+ def on_project_deleted(self, name):
+ self._log.debug("Project {} deleted".format(name))
+ try:
+ self._get_project(name).deregister()
+ except Exception as e:
+ self._log.exception("Project {} deregister for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ try:
+ proj = self._tasklet.projects.pop(name)
+ del proj
+ except Exception as e:
+ self._log.exception("Project {} delete for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+
+ def on_project_added(self, name, cfg):
+ if name not in self._tasklet.projects:
+ try:
+ self._tasklet.projects[name] = \
+ self._class(name, self._tasklet)
+ self._loop.create_task(self._get_project(name).register())
+
+ except Exception as e:
+ self._log.exception("Project {} create for {} failed: {}".
+ format(name, self._get_tasklet_name(), e))
+ raise e
+
+ self._log.debug("Project {} added to tasklet {}".
+ format(name, self._get_tasklet_name()))
+ self._get_project(name)._apply = True
+
+ @asyncio.coroutine
+ def on_add_prepare(self, name, msg):
+ self._log.debug("Project {} to be added to {}".
+ format(name, self._get_tasklet_name()))
+
+ if name in self._tasklet.projects:
+ err_msg = ("Project already exists: {}".
+ format(name))
+ self._log.error(err_msg)
+ return False, err_msg
+
+ # Validate mano-roles, if present
+ try:
+ cfg = msg.project_config
+ users = cfg.user
+ for user in users:
+ for role in user.mano_role:
+ if role.role not in self.mano_roles:
+ err_msg = ("Invalid role {} for user {} in project {}".
+ format(role.role, user.user_name, name))
+ self._log.error(err_msg)
+ return False, err_msg
+
+ except AttributeError as e:
+ # If the user or mano role is not present, ignore
+ self._log.debug("Project {}: {}".format(name, e))
+
+ return True, ""
+
+ @asyncio.coroutine
+ def on_delete_prepare(self, name):
+ self._log.error("Project {} being deleted for tasklet {}".
+ format(name, self._get_tasklet_name()))
+ rc, delete_msg = yield from self._get_project(name).delete_prepare()
+ return (rc, delete_msg)
+
+ def register(self):
+ self.project_cfg_handler.register()
+
+
+class ProjectStateRolePublisher(rift.tasklets.DtsConfigPublisher):
+
+ def __init__(self, tasklet):
+ super().__init__(tasklet)
+ self.proj_state = RwProjectManoYang.YangData_RwProject_Project_ProjectState()
+ self.projects = set()
+ self.roles = MANO_PROJECT_ROLES
+
+ def get_xpath(self):
+ return "D,/rw-project:project/rw-project:project-state/rw-project-mano:mano-role"
+
+ def get_reg_flags(self):
+ return super().get_reg_flags() | rwdts.Flag.DATASTORE
+
+ def role_xpath(self, project, role):
+ return "/rw-project:project[rw-project:name={}]".format(quoted_key(project)) + \
+ "/rw-project:project-state/rw-project-mano:mano-role" + \
+ "[rw-project-mano:role={}]".format(quoted_key(role['mano-role']))
+
+ def pb_role(self, role):
+ pbRole = self.proj_state.create_mano_role()
+ pbRole.role = role['mano-role']
+ pbRole.description = role['description']
+ return pbRole
+
+ def publish_roles(self, project):
+ if not project in self.projects:
+ self.projects.add(project)
+ for role in self.roles:
+ xpath = self.role_xpath(project, role)
+ pb_role = self.pb_role(role)
+ self.log.debug("publishing xpath:{}".format(xpath))
+ self._regh.update_element(xpath, pb_role)
+
+ def unpublish_roles(self, project):
+ if project in self.projects:
+ self.projects.remove(project)
+ for role in self.roles:
+ xpath = self.role_xpath(project, role)
+ self.log.debug("unpublishing xpath:{}".format(xpath))
+ self._regh.delete_element(xpath)
+
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+import asyncio
+import gi
+
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwRbacInternalYang', '1.0')
+gi.require_version('RwProjectManoYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ ProtobufC,
+ RwTypes,
+ RwRbacInternalYang,
+ RwProjectManoYang,
+)
+gi.require_version('RwKeyspec', '1.0')
+from gi.repository.RwKeyspec import quoted_key
+
+import rift.tasklets
+from rift.tasklets.rwidmgr.rbac import (
+ StateMachine,
+ User,
+ UserState,
+ RoleKeys,
+ RoleKeysUsers,
+ encode_role_instance_key,
+)
+from rift.mano.utils.project import (
+ NS_PROJECT,
+ get_add_delete_update_cfgs,
+)
+
+
+from .projectmano import MANO_PROJECT_ROLES
+
+
+class ProjectConfigSubscriber(object):
+ """Config subscriber for rw-user config"""
+
+ def __init__(self, project):
+ self.project_name = project.name
+ self._log = project.log
+ self._dts = project.dts
+
+ self.users = {}
+ self.pub = ProjectMgrManoRoleConfigPublisher(project)
+ self.proj_roles = [role['mano-role'] for role in MANO_PROJECT_ROLES]
+
+ def get_xpath(self):
+ return "C,/{}[name={}]/project-config/user".format(NS_PROJECT, quoted_key(self.project_name))
+
+ def get_reg_flags(self):
+ return (rwdts.Flag.SUBSCRIBER |
+ rwdts.Flag.DELTA_READY |
+ rwdts.Flag.CACHE |
+ rwdts.Flag.DATASTORE)
+
+ def role_inst(self, role, keys=None):
+ if not keys:
+ keys = encode_role_instance_key(self.project_name)
+
+ r = RoleKeys()
+ r.role = role.role
+ r.keys = keys
+ return r
+
+ def delete_user(self, cfg):
+ user = User().pb(cfg)
+ self._log.info("Delete user {} for project {}".
+ format(user.key, self.project_name))
+ if user.key in self.users:
+ roles = self.users[user.key]
+ for role_key in list(roles):
+ self.delete_role(user, role_key)
+ self.users.pop(user.key)
+
+ def update_user(self, cfg):
+ user = User().pb(cfg)
+ self._log.debug("Update user {} for project {} cfg {}".
+ format(user.key, self.project_name, cfg))
+ cfg_roles = {}
+ for cfg_role in cfg.mano_role:
+ r = self.role_inst(cfg_role)
+ cfg_roles[r.key] = r
+
+ if not user.key in self.users:
+ self.users[user.key] = set()
+ else:
+ #Check if any roles are deleted for the user
+ for role_key in list(self.users[user.key]):
+ if role_key not in cfg_roles:
+ self.delete_role(user, role_key)
+
+ for role_key in cfg_roles.keys():
+ if role_key not in self.users[user.key]:
+ self.update_role(user, cfg_roles[role_key])
+
+ def delete_role(self, user, role_key):
+ self._log.info("Delete role {} for user {}".
+ format(role_key, user.key))
+ user_key = user.key
+
+ try:
+ roles = self.users[user_key]
+ except KeyError:
+ roles = set()
+ self.users[user.key] = roles
+
+ if role_key in roles:
+ roles.remove(role_key)
+ self.pub.delete_role(role_key, user_key)
+
+ def update_role(self, user, role):
+ self._log.debug("Update role {} for user {}".
+ format(role.role, user.key))
+
+ user_key = user.key
+
+ try:
+ roles = self.users[user.key]
+ except KeyError:
+ roles = set()
+ self.users[user_key] = roles
+
+ role_key = role.key
+
+ if not role_key in roles:
+ roles.add(role_key)
+ self.pub.add_update_role(role_key, user_key)
+
+ def delete_project(self):
+ # Clean up rw-rbac-intenal
+ self._log.error("Project {} delete".format(self.project_name))
+ for user_key, roles in self.users.items():
+ for role_key in roles:
+ self._log.error("delete role {} for user {}".format(role_key, user_key))
+ self.pub.delete_role(user_key, role_key)
+
+ @asyncio.coroutine
+ def register(self):
+ @asyncio.coroutine
+ def apply_config(dts, acg, xact, action, scratch):
+ self._log.debug("Got user apply config (xact: %s) (action: %s)",
+ xact, action)
+
+ if xact.xact is None:
+ if action == rwdts.AppconfAction.INSTALL:
+ curr_cfg = self._reg.elements
+ for cfg in curr_cfg:
+ self._log.info("Project {} user being restored: {}.".
+ format(self.project_name, cfg.as_dict()))
+ self.update_user(cfg)
+ else:
+ # When RIFT first comes up, an INSTALL is called with the current config
+ # Since confd doesn't actally persist data this never has any data so
+ # skip this for now.
+ self._log.debug("No xact handle. Skipping apply config")
+
+ return
+
+ # TODO: There is user-name and user-domain as keys. Need to fix
+ # this
+ add_cfgs, delete_cfgs, update_cfgs = get_add_delete_update_cfgs(
+ dts_member_reg=self._reg,
+ xact=xact,
+ key_name="user_name",
+ )
+
+ self._log.debug("Added: {}, Deleted: {}, Modified: {}".
+ format(add_cfgs, delete_cfgs, update_cfgs))
+ # Handle Deletes
+ for cfg in delete_cfgs:
+ self.delete_user(cfg)
+
+ # Handle Adds
+ for cfg in add_cfgs:
+ self.update_user(cfg)
+
+ # Handle Updates
+ for cfg in update_cfgs:
+ self.update_user(cfg)
+
+ return RwTypes.RwStatus.SUCCESS
+
+ @asyncio.coroutine
+ def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):
+ """ Prepare callback from DTS for Project """
+
+ action = xact_info.query_action
+
+ self._log.debug("Project %s on_prepare config received (action: %s): %s",
+ self.project_name, xact_info.query_action, msg)
+
+ user = User().pb(msg)
+ if action in [rwdts.QueryAction.CREATE, rwdts.QueryAction.UPDATE]:
+ if user.key in self.users:
+ self._log.debug("User {} update request".
+ format(user.key))
+
+ else:
+ self._log.debug("User {}: on_prepare add request".
+ format(user.key))
+
+ for role in msg.mano_role:
+ if role.role not in self.proj_roles:
+ errmsg = "Invalid MANO role {} for user {}". \
+ format(role.role, user.key)
+ self._log.error(errmsg)
+ xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,
+ self.get_xpath(),
+ errmsg)
+ xact_info.respond_xpath(rwdts.XactRspCode.NACK)
+ return
+
+ elif action == rwdts.QueryAction.DELETE:
+ # Check if the user got deleted
+ fref = ProtobufC.FieldReference.alloc()
+ fref.goto_whole_message(msg.to_pbcm())
+ if fref.is_field_deleted():
+ if user.key in self.users:
+ self._log.debug("User {} being deleted".format(user.key))
+ else:
+ self._log.warning("Delete on unknown user: {}".
+ format(user.key))
+
+ try:
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ except rift.tasklets.dts.ResponseError as e:
+ xpath = ks_path.to_xpath(RwProjectManoYang.get_schema())
+ self._log.debug("Exception sending response for {}: {}".
+ format(xpath, e))
+ return
+
+ else:
+ self._log.error("Action (%s) NOT SUPPORTED", action)
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+
+ xpath = self.get_xpath()
+ self._log.debug("Registering for project config using xpath: %s",
+ xpath,
+ )
+
+ acg_handler = rift.tasklets.AppConfGroup.Handler(
+ on_apply=apply_config,
+ )
+
+ with self._dts.appconf_group_create(acg_handler) as acg:
+ self._reg = acg.register(
+ xpath=xpath,
+ flags=self.get_reg_flags(),
+ on_prepare=on_prepare,
+ )
+
+ yield from self.pub.register()
+ self.pub.create_project_roles()
+
+ def deregister(self):
+ self._log.debug("De-registering DTS handler for project {}".
+ format(self.project_name))
+
+ if self._reg:
+ self._reg.deregister()
+ self._reg = None
+
+ self.pub.delete_project_roles()
+ self.pub.deregister()
+
+class ProjectMgrManoRoleConfigPublisher(rift.tasklets.DtsConfigPublisher):
+
+ def __init__(self, project):
+ super().__init__(project._tasklet)
+ self.project_name = project.name
+ self.notify = project._tasklet.notify
+ self.rbac_int = RwRbacInternalYang.YangData_RwRbacInternal_RwRbacInternal()
+ self.roles = {}
+ self.proj_roles = [role['mano-role'] for role in MANO_PROJECT_ROLES]
+ self.proj_roles_published = False
+
+ def get_xpath(self):
+ return "D,/rw-rbac-internal:rw-rbac-internal/rw-rbac-internal:role"
+
+ def get_reg_flags(self):
+ return super().get_reg_flags() | rwdts.Flag.DATASTORE
+
+ def role_xpath(self, role_key):
+ return "D,/rw-rbac-internal:rw-rbac-internal/rw-rbac-internal:role" + \
+ "[rw-rbac-internal:role={}]".format(quoted_key(role_key[0])) + \
+ "[rw-rbac-internal:keys={}]".format(quoted_key(role_key[1]))
+
+ def role_user_xpath(self, role_key, user_key):
+ return self.role_xpath(role_key) + \
+ "/rw-rbac-internal:user" + \
+ "[rw-rbac-internal:user-name={}]".format(quoted_key(user_key[0])) + \
+ "[rw-rbac-internal:user-domain={}]".format(quoted_key(user_key[1]))
+
+ def pb_role(self, role, user):
+ pbRole = self.rbac_int.create_role()
+ pbRole.role = role.role
+ pbRole.keys = role.keys
+ pbRole.state_machine.state = role.state.name
+
+ pbUser = pbRole.create_user()
+ pbUser.user_name = user.user_name
+ pbUser.user_domain = user.user_domain
+ pbUser.state_machine.state = user.state.name
+
+ pbRole.user.append(pbUser)
+
+ return pbRole
+
+ def pb_project_role(self, role):
+ pbRole = self.rbac_int.create_role()
+ pbRole.role = role.role
+ pbRole.keys = role.keys
+ pbRole.state_machine.state = role.state.name
+ return pbRole
+
+ def add_update_role(self, role_key, user_key):
+ try:
+ role = self.roles[role_key]
+ except KeyError:
+ role = RoleKeysUsers(role_key)
+ self.roles[role_key] = role
+
+ try:
+ user = role.user(user_key)
+ except KeyError:
+ user = UserState(user_key)
+ role.add_user(user)
+
+ user.state = StateMachine.new
+
+ xpath = self.role_xpath(role_key)
+ self.log.debug("add/update role: {} user: {} ".format(role_key, user_key))
+
+ pb_role = self.pb_role(role, user)
+ self._regh.update_element(xpath, pb_role)
+
+ event_desc = "Role '{}' with key '{}' assigned to user '{}' in domain '{}'". \
+ format(role.role, role.keys, user.user_name, user.user_domain)
+ self.notify.send_event("role-assigned", event_desc)
+
+ def delete_role(self, role_key, user_key):
+ try:
+ role = self.roles[role_key]
+ user = role.user(user_key)
+ except KeyError:
+ self.log.error("delete_role: invalid role/user {}/{}".format(role_key, user_key))
+ return
+
+ user.state = StateMachine.delete
+ xpath = self.role_xpath(role_key)
+ self.log.debug("deleting role: {} user: {}".format(role_key, user_key))
+
+ pb_role = self.pb_role(role, user)
+ self._regh.update_element(xpath, pb_role)
+
+ event_desc = "Role '{}' with key '{}' unassigned from user '{}' in domain '{}'". \
+ format(role.role, role.keys, user.user_name, user.user_domain)
+ self.notify.send_event("role-unassigned", event_desc)
+
+ def create_project_roles(self):
+ for name in self.proj_roles:
+ role = RoleKeys()
+ role.role = name
+ role.keys = encode_role_instance_key(self.project_name)
+ self.create_project_role(role)
+
+ def create_project_role(self, role):
+ role_key = role.key
+ try:
+ role = self.roles[role_key]
+ # already exist
+ return
+ except KeyError:
+ role = RoleKeysUsers(role_key)
+ self.roles[role_key] = role
+
+ xpath = self.role_xpath(role.key)
+
+ pb_role = self.pb_project_role(role)
+
+ # print("create_project_role path:{} role:{}".format(xpath, pb_role))
+ self._regh.update_element(xpath, pb_role)
+
+ def delete_project_roles(self):
+ for name in self.proj_roles:
+ role = RoleKeys()
+ role.role = name
+ role.keys = encode_role_instance_key(self.project_name)
+ self.delete_project_role(role)
+
+ def delete_project_role(self, role):
+ xpath = self.role_xpath(role.key)
+
+ self._regh.delete_element(xpath)
+
+ def do_prepare(self, xact_info, action, ks_path, msg):
+ """Handle on_prepare. To be overridden by Concreate Publisher Handler
+ """
+ role_key = tuple([msg.role, msg.keys])
+ try:
+ role = self.roles[role_key]
+ except KeyError:
+ xact_info.respond_xpath(rwdts.XactRspCode.NA)
+ return
+
+ self.log.debug("do_prepare (MANO-ROLES): action: {}, path: {}, msg: {}".format(action, ks_path, msg))
+ xact_info.respond_xpath(rwdts.XactRspCode.ACK)
+ xpath = self.role_xpath(role_key)
+
+ if msg.state_machine.state == 'init_done':
+ msg.state_machine.state = 'active'
+ role.state = StateMachine.active
+ self._regh.update_element(xpath, msg)
+ elif msg.state_machine.state == 'delete_done':
+ self._regh.delete_element(xpath)
+ del self.roles[role_key]
+ # deleted at role level, skip processing users under it
+ return
+
+ if msg.user:
+ for pbUser in msg.user:
+ user_key = tuple([pbUser.user_name, pbUser.user_domain])
+ try:
+ user = role.user(user_key)
+ except KeyError:
+ self._log.debug("**** User {} not found".format(user_key))
+ continue
+ user_xpath = self.role_user_xpath(role_key, user_key)
+ state = pbUser.state_machine.state
+ if state == 'init_done':
+ pbUser.state_machine.state = 'active'
+ user.state = StateMachine.active
+ self._regh.update_element(xpath, msg)
+ elif state == 'delete_done':
+ role.delete_user(user)
+ self._regh.delete_element(user_xpath)
+
+ def deregister(self):
+ if self.reg:
+ self.delete_project_roles()
+ super().deregister()
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Mano Project Manager tasklet is responsible for managing the Projects
+configurations required for Role Based Access Control feature.
+"""
+
+import asyncio
+
+import gi
+gi.require_version('RwDts', '1.0')
+gi.require_version('RwLog', '1.0')
+gi.require_version('RwProjectYang', '1.0')
+gi.require_version('RwProjectManoYang', '1.0')
+from gi.repository import (
+ RwDts as rwdts,
+ RwLog as rwlog,
+ RwProjectYang,
+ RwProjectManoYang,
+)
+
+import rift.tasklets
+
+from rift.tasklets.rwidmgr.rbac import (
+ RbacNotification,
+)
+
+from rift.mano.utils.project import (
+ ManoProject,
+ )
+
+from .projectmano import (
+ ProjectHandler,
+ ProjectStateRolePublisher,
+)
+
+from .rolesmano import (
+ ProjectMgrManoRoleConfigPublisher,
+ ProjectConfigSubscriber,
+)
+
+
+class ProjectMgrManoProject(ManoProject):
+
+ def __init__(self, name, tasklet):
+ super(ProjectMgrManoProject, self).__init__(tasklet.log, name)
+ self.update(tasklet)
+
+ self.project_sub = ProjectConfigSubscriber(self)
+
+ @asyncio.coroutine
+ def register (self):
+ self._log.info("Initializing the ProjectMgrMano for %s", self.name)
+ yield from self.project_sub.register()
+ self.tasklet.project_state_role_pub.publish_roles(self.name)
+
+ def deregister(self):
+ self._log.info("De-register project %s", self.name)
+ self.tasklet.project_state_role_pub.unpublish_roles(self.name)
+ self.project_sub.deregister()
+
+
+class ProjectMgrManoTasklet(rift.tasklets.Tasklet):
+ """Tasklet that manages the Project config
+ """
+ def __init__(self, *args, **kwargs):
+ """Constructs a ProjectManager tasklet"""
+ try:
+ super().__init__(*args, **kwargs)
+ self.rwlog.set_category("rw-mano-log")
+ self.notify = RbacNotification(self)
+
+ self.projects = {}
+
+ except Exception as e:
+ self.log.exception(e)
+
+
+ def start(self):
+ """Callback that gets invoked when a Tasklet is started"""
+ super().start()
+ self.log.info("Starting Mano Project Manager Tasklet")
+
+ self.log.debug("Registering with dts")
+ self.dts = rift.tasklets.DTS(
+ self.tasklet_info,
+ RwProjectManoYang.get_schema(),
+ self.loop,
+ self.on_dts_state_change
+ )
+
+ self.log.debug("Created DTS Api Object: %s", self.dts)
+
+ def stop(self):
+ """Callback that gets invoked when Tasklet is stopped"""
+ try:
+ self.dts.deinit()
+ except Exception as e:
+ self.log.exception(e)
+
+ @asyncio.coroutine
+ def init(self):
+ """DTS Init state handler"""
+ try:
+ self.log.info("Registering for Project Config")
+ self.project_handler = ProjectHandler(self, ProjectMgrManoProject)
+ self.project_handler.register()
+
+ self.project_state_role_pub = ProjectStateRolePublisher(self)
+ yield from self.project_state_role_pub.register()
+
+ except Exception as e:
+ self.log.exception("Registering for project failed: {}".format(e))
+
+ @asyncio.coroutine
+ def run(self):
+ """DTS run state handler"""
+ pass
+
+ @asyncio.coroutine
+ def on_dts_state_change(self, state):
+ """Handle DTS state change
+
+ Take action according to current DTS state to transition application
+ into the corresponding application state
+
+ Arguments
+ state - current dts state
+
+ """
+ switch = {
+ rwdts.State.INIT: rwdts.State.REGN_COMPLETE,
+ rwdts.State.CONFIG: rwdts.State.RUN,
+ }
+
+ handlers = {
+ rwdts.State.INIT: self.init,
+ rwdts.State.RUN: self.run,
+ }
+
+ # Transition application to next state
+ handler = handlers.get(state, None)
+ if handler is not None:
+ yield from handler()
+
+ # Transition dts to next state
+ next_state = switch.get(state, None)
+ if next_state is not None:
+ self.dts.handle.set_state(next_state)
+
+ def config_ready(self):
+ """Subscription is complete and ready to start publishing."""
+ self.log.debug("Configuration Ready")
+
+
+# vim: ts=4 sw=4 et
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import rift.tasklets.rwprojectmano
+
+class Tasklet(rift.tasklets.rwprojectmano.ProjectMgrManoTasklet):
+ pass
+
+# vim: sw=4
--- /dev/null
+#
+# Copyright 2017 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+rift_add_yang_target(
+ TARGET rwprojectmano_yang
+ YANG_FILES
+ rw-project-mano.yang
+ GIR_PATHS ${CMAKE_CURRENT_BINARY_DIR}
+ COMPONENT ${INSTALL_COMPONENT}
+ LIBRARIES
+ rw_project_yang_gen
+ )
--- /dev/null
+#
+# Copyright 2016 RIFT.IO Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Author(s): Tim Mortsolf
+# Creation Date: 11/25/2013
+#
+
+##
+# Define a Makefile function: find_upwards(filename)
+#
+# Searches for a file of the given name in the directory ., .., ../.., ../../.., etc.,
+# until the file is found or the root directory is reached
+##
+find_upward = $(word 1, $(shell while [ `pwd` != / ] ; do find `pwd` -maxdepth 1 -name $1 ; cd .. ; done))
+
+##
+# Call find_upward("Makefile.top") to find the nearest upwards adjacent Makefile.top
+##
+makefile.top := $(call find_upward, "Makefile.top")
+
+##
+# If Makefile.top was found, then include it
+##
+include $(makefile.top)
--- /dev/null
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+module rw-project-mano-tailf
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-mano-tailf";
+ prefix "rw-project-mano-tailf";
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import tailf-common {
+ prefix tailf;
+ }
+
+ import rw-project-mano {
+ prefix "rw-project-mano";
+ }
+
+ revision 2017-04-04 {
+ description
+ "Initial revision.";
+ }
+
+ tailf:annotate "/rw-project:project/rw-project:project-state/rw-project-mano:mano-role" {
+ tailf:callpoint rw_callpoint;
+ }
+}
--- /dev/null
+/*
+ *
+ * Copyright 2017 RIFT.IO Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+module rw-project-mano
+{
+ namespace "http://riftio.com/ns/riftware-1.0/rw-project-mano";
+ prefix "rw-project-mano";
+
+ import rw-rbac-base {
+ prefix "rw-rbac-base";
+ }
+
+ import rw-project {
+ prefix "rw-project";
+ }
+
+ import rw-rbac-internal {
+ prefix "rw-rbac-internal";
+ }
+
+ revision 2017-03-08 {
+ description
+ "Initial revision. This YANG file defines the
+ MANO extentions for project based tenancy";
+ reference
+ "Derived from earlier versions of base YANG files";
+ }
+
+ identity catalog-oper {
+ base rw-project:project-role;
+ description
+ "The catalog-oper Role has read permission to the VNFD and NSD
+ catalogs within a Project. The catalog-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity catalog-admin {
+ base rw-project:project-role;
+ description
+ "The catalog-admin Role has full CRUDX permissions to the VNFD
+ and NSD catalogs within a Project. The catalog-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ identity lcm-oper {
+ base rw-project:project-role;
+ description
+ "The lcm-oper Role has read permission to the VL, VNF and NS
+ records within a Project. The lcm-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity lcm-admin {
+ base rw-project:project-role;
+ description
+ "The lcm-admin Role has full CRUDX permissions to the VL, VNF
+ and NS records within a Project. The lcm-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ identity account-oper {
+ base rw-project:project-role;
+ description
+ "The account-oper Role has read permission to the VIM, SDN, VCA
+ and RO accounts within a Project. The account-oper Role may also have
+ execute permission to specific non-mutating RPCs.";
+ }
+
+ identity account-admin {
+ base rw-project:project-role;
+ description
+ "The account-admin Role has full CRUDX permissions to the VIM, SDN, VCA
+ and RO accounts within a Project. The account-admin Role does
+ not provide general CRUDX permissions to the Project as a whole,
+ nor to the RIFT.ware platform in general.";
+ }
+
+ augment /rw-project:project/rw-project:project-config/rw-project:user {
+ description
+ "Configuration for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The list of MANO application-specific Roles the User has been
+ assigned, within the enclosing Project.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+ }
+ }
+
+ augment /rw-project:project/rw-project:project-state/rw-project:user {
+ description
+ "The state for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The state of the MANO application-specific Role the User has
+ been assigned.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+
+ leaf state {
+ description
+ "The assignment of a User to a Role may be an asynchronous
+ operation. This value indicates whether the Role
+ assignment is active. If the value is 'active', then the
+ assignment is complete and active. Any other value
+ indicates that Role assignment is in a transitional or
+ failed state, as described in the value.";
+ type string;
+ }
+ }
+ }
+
+ augment /rw-project:project/rw-project:project-state {
+ description
+ "State for MANO application-specific Roles.";
+
+ list mano-role {
+ description
+ "The set of Roles that may be configured into
+ /rw-project:project/rw-project:project-config/rw-project:user/
+ rw-project-mano:mano-role/rw-project-mano:role.";
+
+ key "role";
+ uses rw-rbac-base:simple-role;
+
+ leaf description {
+ description
+ "A description of the Role.";
+ type string;
+ }
+ }
+ }
+}
-rw-base
-rw-mgmtagt
-rw-manifest
-rw-vcs
-rwlog-mgmt
-rw-dts
-rwmsg-data
-rw-dtsperf
-rwshell-mgmt
-rw-debug
-rw-dtsperfmgr
-rw-memlog
-mano-base
-rw-sorch
-rw-restportforward
-mano-types
-rw-yang-types
-rw-log
-rwvcs-types
-rw-netconf
-rwcal
-rw-pb-ext
-rw-notify-ext
-rw-mgmt-schema
-rw-cli-ext
-ietf-inet-types
-ietf-yang-types
vnfr
nsr
-ietf-restconf-monitoring
-ietf-netconf-notifications
prefix rwbase;
}
- import rw-pb-ext {
- prefix "rwpb";
- }
-
import rw-yang-types {
prefix "rwt";
}
* Generic Logger Log Events - ID space 160000 - 160099
*/
notification debug {
- rwpb:msg-new Debug;
rwnotify:log-event-id 160000;
description
"Generic Debug Log";
}
notification info {
- rwpb:msg-new Info;
rwnotify:log-event-id 160001;
description
"Generic Info Log";
}
notification warn {
- rwpb:msg-new Warn;
rwnotify:log-event-id 160002;
description
"Generic Warning Log";
}
notification error {
- rwpb:msg-new Error;
rwnotify:log-event-id 160003;
description
"Generic Warning Log";
}
notification critical {
- rwpb:msg-new Critical;
rwnotify:log-event-id 160004;
description
"Generic Critical Log";